Python onnx.ModelProto() Examples

The following are 30 code examples of onnx.ModelProto(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module onnx , or try the search function .
Example #1
Source File: __init__.py    From ngraph-onnx with Apache License 2.0 7 votes vote down vote up
def run_model(onnx_model, data_inputs):
    # type: (onnx.ModelProto, List[np.ndarray]) -> List[np.ndarray]
    """
    Convert ONNX model to an ngraph model and perform computation on input data.

    :param onnx_model: ONNX ModelProto describing an ONNX model
    :param data_inputs: list of numpy ndarrays with input data
    :return: list of numpy ndarrays with computed output
    """
    NgraphBackend.backend_name = BACKEND_NAME
    if NgraphBackend.supports_ngraph_device(NgraphBackend.backend_name):
        ng_model_function = import_onnx_model(onnx_model)
        runtime = get_runtime()
        computation = runtime.computation(ng_model_function)
        return computation(*data_inputs)
    else:
        raise RuntimeError('The requested nGraph backend <'
                           + NgraphBackend.backend_name + '> is not supported!') 
Example #2
Source File: optimizer.py    From onnxconverter-common with MIT License 6 votes vote down vote up
def optimize_onnx_model(origin_model, nchw_inputs=None, stop_initializers=None):
    # type: (onnx.ModelProto, list, list) -> onnx.ModelProto
    """
    the origin model will be updated after the optimization.
    :param origin_model:
    :param nchw_inputs:
    :return:
    """
    graph = origin_model.graph
    nodelist = list(graph.node)

    opt_graph = optimize_onnx_graph(nodelist,
                                    nchw_inputs=nchw_inputs,
                                    inputs=graph.input,
                                    outputs=graph.output,
                                    initializers=list(graph.initializer),
                                    stop_initializers=stop_initializers,
                                    model_value_info=graph.value_info,
                                    model_name=graph.name,
                                    target_opset=next(opset_.version for opset_ in origin_model.opset_import
                                                      if opset_.domain == '' or opset_.domain == 'ai.onnx'))

    origin_model.graph.CopyFrom(opt_graph)
    return origin_model 
Example #3
Source File: _backend.py    From coremltools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _get_onnx_outputs_info(model):  # type: (...) -> Dict[Text, EdgeInfo]
    """
    Takes in an onnx model and returns a dictionary
    of onnx output names mapped to a tuple that is (output_name, type, shape)
    """
    if isinstance(model, _string_types):
        onnx_model = onnx.load(model)
    elif isinstance(model, onnx.ModelProto):
        onnx_model = model

    graph = onnx_model.graph
    onnx_output_dict = {}
    for o in graph.output:
        out = _input_from_onnx_input(o)
        onnx_output_dict[out[0]] = out
    return onnx_output_dict 
Example #4
Source File: _backend.py    From coremltools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def prepare(
        cls,
        model,  # type: ModelProto
        device="CPU",  # type: Text
        minimum_ios_deployment_target="12",  # type: str
        **kwargs  # type: Any
    ):
        # type: (...) -> CoreMLRep
        super(CoreMLBackend, cls).prepare(model, device, **kwargs)
        if DEBUG:
            with open("/tmp/node_model.onnx", "wb") as f:
                s = model.SerializeToString()
                f.write(s)
        coreml_model = convert(
            model, minimum_ios_deployment_target=minimum_ios_deployment_target
        )
        if DEBUG:
            coreml_model.save("/tmp/node_model.mlmodel")
        onnx_outputs_info = _get_onnx_outputs_info(model)
        return CoreMLRep(
            coreml_model,
            onnx_outputs_info,
            device == "CPU",
            minimum_ios_deployment_target=minimum_ios_deployment_target,
        ) 
Example #5
Source File: _backend.py    From coremltools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def prepare(
        cls,
        model,  # type: ModelProto
        device="CPU",  # type: Text
        minimum_ios_deployment_target="13",  # type: str
        **kwargs  # type: Any
    ):
        # type: (...) -> CoreMLRep
        super(CoreMLBackendND, cls).prepare(model, device, **kwargs)
        if DEBUG:
            with open("/tmp/node_model.onnx", "wb") as f:
                s = model.SerializeToString()
                f.write(s)
        coreml_model = convert(
            model, minimum_ios_deployment_target=minimum_ios_deployment_target
        )
        if DEBUG:
            coreml_model.save("/tmp/node_model.mlmodel")
        onnx_outputs_info = _get_onnx_outputs_info(model)
        return CoreMLRep(
            coreml_model,
            onnx_outputs_info,
            device == "CPU",
            minimum_ios_deployment_target=minimum_ios_deployment_target,
        ) 
Example #6
Source File: _backend.py    From coremltools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def is_compatible(
        cls,
        model,  # type: ModelProto
        device="CPU",  # type: Text
        **kwargs  # type: Any
    ):  # type: (...) -> bool
        # Return whether the model is compatible with CoreML.
        """
        This function will gradually grow to cover more cases.
        Need to be careful of false negatives. There are some cases that seemingly
        are not supported on CoreML, which the graph transformer optimizes and converts to
        a graph that can be converted to CoreML.

        2. Unsupported ops: If graph has one of unsupported op, exit

        """
        ## TODO: Add un-supported ops
        unsupported_ops = []
        graph = model.graph
        for node in graph.node:
            if node.op_type in unsupported_ops:
                return False
        return True 
Example #7
Source File: onnx_model_artifact.py    From BentoML with Apache License 2.0 6 votes vote down vote up
def save(self, dst):
        if self._onnx_model_path:
            shutil.copyfile(
                self._onnx_model_path, self.spec._saved_model_file_path(dst)
            )
        elif self._model_proto:
            try:
                import onnx
            except ImportError:
                raise MissingDependencyException(
                    '"onnx" package is required for packing with OnnxModelArtifact'
                )
            onnx.save_model(self._model_proto, self.spec._saved_model_file_path(dst))
        else:
            raise InvalidArgument(
                'onnx.ModelProto or a model file path is required to pack an '
                'OnnxModelArtifact'
            ) 
Example #8
Source File: onnx_translator.py    From eran with Apache License 2.0 6 votes vote down vote up
def __init__(self, model):
		"""
		This constructor takes a reference to a ONNX Model and checks model, infers intermediate shapes and sets up maps from name to type and node or constant value
		graph_util.convert_variables_to_constants and graph_util.remove_training_nodes to cleanse the graph of any nodes that are linked to training. This leaves us with 
		the nodes you need for inference. 
		In the resulting graph there should only be tf.Operations left that have one of the following types [Const, MatMul, Add, BiasAdd, Conv2D, Reshape, MaxPool, AveragePool, Placeholder, Relu, Sigmoid, Tanh]
		If the input should be a Keras model we will ignore operations with type Pack, Shape, StridedSlice, and Prod such that the Flatten layer can be used.
		
		Arguments
		---------
		model : onnx.ModelProto
		"""
		if issubclass(model.__class__, onnx.ModelProto):
			onnx.checker.check_model(model)
			self.model = model
			self.nodes = self.model.graph.node

			self.shape_map, self.constants_map, self.output_node_map, self.input_node_map, self.placeholdernames = prepare_model(model)
		else:
			assert 0, 'not onnx model' 
Example #9
Source File: _backend.py    From onnx-coreml with MIT License 6 votes vote down vote up
def is_compatible(cls,
                       model,  # type: ModelProto
                       device='CPU',  # type: Text
                       **kwargs  # type: Any
                       ):  # type: (...) -> bool
        # Return whether the model is compatible with CoreML.
        '''
        This function will gradually grow to cover more cases. 
        Need to be careful of false negatives. There are some cases that seemingly 
        are not supported on CoreML, which the graph transformer optimizes and converts to 
        a graph that can be converted to CoreML. 
        
        2. Unsupported ops: If graph has one of unsupported op, exit
           
        '''
        ## TODO: Add un-supported ops
        unsupported_ops = []
        graph = model.graph
        for node in graph.node:
            if node.op_type in unsupported_ops:
                return False
        return True 
Example #10
Source File: _backend.py    From onnx-coreml with MIT License 6 votes vote down vote up
def prepare(cls,
                model,  # type: ModelProto
                device='CPU',  # type: Text
                minimum_ios_deployment_target='13', # type: str
                **kwargs  # type: Any
                ):
        # type: (...) -> CoreMLRep
        super(CoreMLBackendND, cls).prepare(model, device, **kwargs)
        if DEBUG:
            with open('/tmp/node_model.onnx', 'wb') as f:
                s = model.SerializeToString()
                f.write(s)
        coreml_model = convert(model, minimum_ios_deployment_target=minimum_ios_deployment_target)
        if DEBUG:
            coreml_model.save('/tmp/node_model.mlmodel')
        onnx_outputs_info = _get_onnx_outputs_info(model)
        return CoreMLRep(coreml_model, onnx_outputs_info, device == 'CPU', minimum_ios_deployment_target=minimum_ios_deployment_target) 
Example #11
Source File: _backend.py    From onnx-coreml with MIT License 6 votes vote down vote up
def prepare(cls,
                model,  # type: ModelProto
                device='CPU',  # type: Text
                minimum_ios_deployment_target='12', # type: str
                **kwargs  # type: Any
                ):
        # type: (...) -> CoreMLRep
        super(CoreMLBackend, cls).prepare(model, device, **kwargs)
        if DEBUG:
            with open('/tmp/node_model.onnx', 'wb') as f:
                s = model.SerializeToString()
                f.write(s)
        coreml_model = convert(model, minimum_ios_deployment_target=minimum_ios_deployment_target)
        if DEBUG:
            coreml_model.save('/tmp/node_model.mlmodel')
        onnx_outputs_info = _get_onnx_outputs_info(model)
        return CoreMLRep(coreml_model, onnx_outputs_info, device == 'CPU', minimum_ios_deployment_target=minimum_ios_deployment_target) 
Example #12
Source File: _backend.py    From onnx-coreml with MIT License 6 votes vote down vote up
def _get_onnx_outputs_info(model): # type: (...) -> Dict[Text, EdgeInfo]
    """
    Takes in an onnx model and returns a dictionary 
    of onnx output names mapped to a tuple that is (output_name, type, shape)
    """
    if isinstance(model, str):
        onnx_model = onnx.load(model)
    elif isinstance(model, onnx.ModelProto):
        onnx_model = model

    graph = onnx_model.graph
    onnx_output_dict = {}
    for o in graph.output:
        out = _input_from_onnx_input(o)
        onnx_output_dict[out[0]] = out
    return onnx_output_dict 
Example #13
Source File: onnx_converters.py    From chainer-compiler with MIT License 6 votes vote down vote up
def generate_model(self, inputs, outputs, graph, model) -> 'ModelProto':

        # assign param names
        self.param2name = {id(p): 'param' + n.replace('/', '_')
                           for n, p in model.namedparams()}

        for p, n in self.param2name.items():
            assigned_names.append(n)

        # assign onnx name
        assign_onnx_name(graph)

        graph_ = self.generate_graph(inputs, outputs, graph, None, True)
        onnx_model = oh.make_model(
            graph_, producer_name="elichika", producer_version="0.1")
        return onnx_model 
Example #14
Source File: backend.py    From dragon with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def prepare(cls, model, device='CUDA:0', **kwargs):
        """Build a TensorRT engine from the onnx model.

        Parameters
        ----------
        model : onnx.ModelProto
            The onnx model.
        device : str, optional
            The executing device.

        Returns
        -------
        dragon.vm.tensorrt.ONNXBackendRep
            The backend rep.

        """
        if not isinstance(device, Device):
            device = Device(device)
        return ONNXBackendRep(model, device, **kwargs) 
Example #15
Source File: backend.py    From dragon with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def run_model(cls, model, inputs, device='CUDA:0', **kwargs):
        """Build and run a TensorRT engine from the onnx model.

        Parameters
        ----------
        model : onnx.ModelProto
            The onnx model.
        inputs : Union[Sequence, Dict]
            The input arrays.
        device : str, optional
            The executing device.

        Returns
        -------
        namedtuple
            The model outputs.

        """
        return cls.prepare(model, device, **kwargs).run(inputs) 
Example #16
Source File: parser.py    From deep500 with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def parse_model(onnx_model: onnx.ModelProto) -> OnnxModel:
    """
    @param model Existing ONNX binary model
    @return parsed model
    """
    return OnnxModel.create_from_onnx_model(onnx_model) 
Example #17
Source File: parser.py    From deep500 with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def load_and_parse_binary(binary: onnx.ModelProto) -> OnnxModel:
    model = onnx.load_from_string(binary)
    model = OnnxModel.create_from_onnx_model(model)
    model = clean_model(model)
    return model 
Example #18
Source File: parser.py    From deep500 with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def load_model_only(path: str) -> onnx.ModelProto:
    """
     @param path path to file
     @return deserialized onnx model
     """
    with open(path, 'rb') as model_file:
        binary = model_file.read()
    model = onnx.load_from_string(binary)
    return model 
Example #19
Source File: onnx_helper.py    From minionn with Apache License 2.0 5 votes vote down vote up
def stripModelFromPrivateData(m):
    """
    Strips the given model from all private data and returns a copy.
    This usually includes all tensors, i.e. all w and b.
    """
    # Create new model and copy all from m
    privatizedModel = onnx.ModelProto()
    privatizedModel.CopyFrom(m)

    # Clear the tensors from the model
    del privatizedModel.graph.initializer[:]

    # Return the privatized model
    return privatizedModel 
Example #20
Source File: tensorrt_loaders.py    From NeMo with Apache License 2.0 5 votes vote down vote up
def __init__(self, onnx_loader, explicit_precision=None):
        """
        Parses an ONNX model to create an engine.

        Args:
            onnx_loader (Callable() -> onnx.ModelProto): A loader that can supply an ONNX model.

        Optional Args:
            explicit_precision (bool): Whether to create the network with explicit precision enabled.
        """
        self.onnx_loader = onnx_loader
        self.explicit_precision = default_value(explicit_precision, False) 
Example #21
Source File: onnx_model_artifact.py    From BentoML with Apache License 2.0 5 votes vote down vote up
def _get_onnx_inference_session(self):
        if self.spec.backend == "onnxruntime":
            try:
                import onnxruntime
            except ImportError:
                raise MissingDependencyException(
                    '"onnxruntime" package is required for inferencing with onnx '
                    'runtime as backend'
                )

            if self._model_proto:
                logger.info(
                    "Initializing onnxruntime InferenceSession with onnx.ModelProto "
                    "instance"
                )
                return onnxruntime.InferenceSession(
                    self._model_proto.SerializeToString()
                )
            elif self._onnx_model_path:
                logger.info(
                    "Initializing onnxruntime InferenceSession from onnx file:"
                    f"'{self._onnx_model_path}'"
                )
                return onnxruntime.InferenceSession(self._onnx_model_path)
            else:
                raise BentoMLException("OnnxModelArtifact in bad state")
        else:
            raise BentoMLException(
                f'"{self.spec.backend}" runtime is currently not supported for '
                f'OnnxModelArtifact'
            ) 
Example #22
Source File: onnx_model_artifact.py    From BentoML with Apache License 2.0 5 votes vote down vote up
def __init__(self, spec, path_or_model_proto):
        """
        :param spec: parent OnnxModelArtifact
        :param path_or_model_proto: .onnx file path or onnx.ModelProto object
        """
        super(_OnnxModelArtifactWrapper, self).__init__(spec)

        self._inference_session = None

        self._onnx_model_path = None
        self._model_proto = None
        if _is_onnx_model_file(path_or_model_proto):
            self._onnx_model_path = path_or_model_proto
        else:
            try:
                import onnx

                if isinstance(path_or_model_proto, onnx.ModelProto):
                    self._model_proto = path_or_model_proto
                else:
                    raise InvalidArgument(
                        'onnx.ModelProto or a .onnx model file path is required to '
                        'pack an OnnxModelArtifact'
                    )
            except ImportError:
                raise InvalidArgument(
                    'onnx.ModelProto or a .onnx model file path is required to pack '
                    'an OnnxModelArtifact'
                )

        assert self._onnx_model_path or self._model_proto, (
            "Either self._onnx_model_path or self._model_proto has to be initilaized "
            "after initializing _OnnxModelArtifactWrapper"
        ) 
Example #23
Source File: serialization.py    From dragon with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def load_model_from_string(s):
    if ModelProto is None:
        raise ImportError('ONNX is not installed.')
    return _deserialize(s, ModelProto()) 
Example #24
Source File: utils.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def polish_model(model):  # type: (ModelProto) -> ModelProto
    '''
        This function combines several useful utility functions together.
    '''
    onnx.checker.check_model(model)
    onnx.helper.strip_doc_string(model)
    model = onnx.shape_inference.infer_shapes(model)
    model = onnx.optimizer.optimize(model)
    onnx.checker.check_model(model)
    return model 
Example #25
Source File: basic_test.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def test_version_exists(self):  # type: () -> None
        model = ModelProto()
        # When we create it, graph should not have a version string.
        self.assertFalse(model.HasField('ir_version'))
        # We should touch the version so it is annotated with the current
        # ir version of the running ONNX
        model.ir_version = IR_VERSION
        model_string = model.SerializeToString()
        model.ParseFromString(model_string)
        self.assertTrue(model.HasField('ir_version'))
        # Check if the version is correct.
        self.assertEqual(model.ir_version, IR_VERSION) 
Example #26
Source File: basic_test.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def test_existence(self):  # type: () -> None
        try:
            AttributeProto
            NodeProto
            GraphProto
            ModelProto
        except Exception as e:
            self.fail(
                'Did not find proper onnx protobufs. Error is: {}'
                .format(e)) 
Example #27
Source File: basic_test.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def test_save_and_load_model(self):  # type: () -> None
        proto = self._simple_model()
        cls = ModelProto
        proto_string = onnx._serialize(proto)

        # Test if input is string
        loaded_proto = onnx.load_model_from_string(proto_string)
        self.assertTrue(proto == loaded_proto)

        # Test if input has a read function
        f = io.BytesIO()
        onnx.save_model(proto_string, f)
        f = io.BytesIO(f.getvalue())
        loaded_proto = onnx.load_model(f, cls)
        self.assertTrue(proto == loaded_proto)

        # Test if input is a file name
        try:
            fi = tempfile.NamedTemporaryFile(delete=False)
            onnx.save_model(proto, fi)
            fi.close()

            loaded_proto = onnx.load_model(fi.name, cls)
            self.assertTrue(proto == loaded_proto)
        finally:
            os.remove(fi.name) 
Example #28
Source File: model_wrappers.py    From ngraph-python with Apache License 2.0 5 votes vote down vote up
def __init__(self, model_proto):  # type: (onnx.ModelProto) -> None
        self.graph = GraphWrapper(model_proto.graph)
        super(ModelWrapper, self).__init__(model_proto, self.graph) 
Example #29
Source File: OnnxModel.py    From FARM with Apache License 2.0 5 votes vote down vote up
def save_model_to_file(self, output_path):
        logger.info(f"Output model to {output_path}")

        if output_path.endswith(".json"):
            assert isinstance(self.model, ModelProto)
            with open(output_path, "w") as out:
                out.write(str(self.model))
        else:
            with open(output_path, "wb") as out:
                out.write(self.model.SerializeToString()) 
Example #30
Source File: importer.py    From ngraph-onnx with Apache License 2.0 5 votes vote down vote up
def import_onnx_model(onnx_protobuf):  # type: (onnx.ModelProto) -> List[Function]
    """
    Import an ONNX Protocol Buffers model and convert it into a list of ngraph Functions.

    :param onnx_protobuf: ONNX Protocol Buffers model (onnx_pb2.ModelProto object)
    :return: list of ngraph Functions representing computations for each output.
    """
    if not isinstance(onnx_protobuf, onnx.ModelProto):
        raise UserInputError('Input does not seem to be a properly formatted ONNX model.')

    return onnx_import.import_onnx_model(onnx_protobuf.SerializeToString())