Python onnx.load() Examples

The following are 30 code examples of onnx.load(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module onnx , or try the search function .
Example #1
Source File: test_export_testcase.py    From onnx-chainer with MIT License 6 votes vote down vote up
def test_output_grad(tmpdir, model, x, train, disable_experimental_warning):
    path = str(tmpdir)
    export_testcase(model, (x,), path, output_grad=True, train=train)

    model_filename = os.path.join(path, 'model.onnx')
    assert os.path.isfile(model_filename)
    assert os.path.isfile(os.path.join(path, 'test_data_set_0', 'input_0.pb'))
    assert os.path.isfile(os.path.join(path, 'test_data_set_0', 'output_0.pb'))

    onnx_model = onnx.load(model_filename)
    initializer_names = {i.name for i in onnx_model.graph.initializer}

    # 10 gradient files should be there
    for i in range(12):
        tensor_filename = os.path.join(
            path, 'test_data_set_0', 'gradient_{}.pb'.format(i))
        assert os.path.isfile(tensor_filename)
        tensor = onnx.load_tensor(tensor_filename)
        assert tensor.name.startswith('param_')
        assert tensor.name in initializer_names
    assert not os.path.isfile(
        os.path.join(path, 'test_data_set_0', 'gradient_12.pb')) 
Example #2
Source File: test_export_testcase.py    From chainer with MIT License 6 votes vote down vote up
def test_output_grad(tmpdir, model, x, train, disable_experimental_warning):
    path = str(tmpdir)
    export_testcase(model, (x,), path, output_grad=True, train=train)

    model_filename = os.path.join(path, 'model.onnx')
    assert os.path.isfile(model_filename)
    assert os.path.isfile(os.path.join(path, 'test_data_set_0', 'input_0.pb'))
    assert os.path.isfile(os.path.join(path, 'test_data_set_0', 'output_0.pb'))

    onnx_model = onnx.load(model_filename)
    initializer_names = {i.name for i in onnx_model.graph.initializer}

    # 10 gradient files should be there
    for i in range(12):
        tensor_filename = os.path.join(
            path, 'test_data_set_0', 'gradient_{}.pb'.format(i))
        assert os.path.isfile(tensor_filename)
        tensor = onnx.load_tensor(tensor_filename)
        assert tensor.name.startswith('param_')
        assert tensor.name in initializer_names
    assert not os.path.isfile(
        os.path.join(path, 'test_data_set_0', 'gradient_12.pb')) 
Example #3
Source File: neural_style.py    From PyTorch with MIT License 6 votes vote down vote up
def stylize_onnx_caffe2(content_image, args):
    """
    Read ONNX model and run it using Caffe2
    """

    assert not args.export_onnx

    import onnx
    import onnx_caffe2.backend

    model = onnx.load(args.model)

    prepared_backend = onnx_caffe2.backend.prepare(model, device='CUDA' if args.cuda else 'CPU')
    inp = {model.graph.input[0].name: content_image.numpy()}
    c2_out = prepared_backend.run(inp)[0]

    return torch.from_numpy(c2_out) 
Example #4
Source File: onnx.py    From utensor_cgen with Apache License 2.0 6 votes vote down vote up
def parse(self, onnx_file, output_nodes=None, model_name=None):
    tf.disable_eager_execution()
    if model_name:
      graph_name = model_name
    else:
      graph_name, _ = os.path.splitext(
        os.path.basename(onnx_file)
      )
    tf.reset_default_graph()
    model = onnx.load(onnx_file)
    onnx_graph = model.graph
    ugraph = uTensorGraph(
      name=graph_name,
      output_nodes=[],
      lib_name='onnx',
      ops_info={},
    )
    self._build_graph(onnx_graph, ugraph)
    ugraph = Legalizer.legalize(ugraph)
    tf.reset_default_graph()
    return ugraph 
Example #5
Source File: model.py    From lumin with Apache License 2.0 6 votes vote down vote up
def from_save(cls, name:str, model_builder:ModelBuilder) -> AbsModel:
        r'''
        Instantiated a :class:`~lumin.nn.models.model.Model` and load saved state from file.
        
        Arguments:
            name: name of file containing saved state
            model_builder: :class:`~lumin.nn.models.model_builder.ModelBuilder` which was used to construct the network
        
        Returns:
            Instantiated :class:`~lumin.nn.models.model.Model` with network weights, optimiser state, and input mask loaded from saved state
        
        Examples::
            >>> model = Model.from_save('weights/model.h5', model_builder)
        '''

        m = cls(model_builder)
        m.load(name)
        return m 
Example #6
Source File: model.py    From lumin with Apache License 2.0 6 votes vote down vote up
def load(self, name:str, model_builder:ModelBuilder=None) -> None:
        r'''
        Load model, optimiser, and input mask states from file

        Arguments:
            name: name of save file
            model_builder: if :class:`~lumin.nn.models.model.Model` was not initialised with a :class:`~lumin.nn.models.model_builder.ModelBuilder`, you will need to pass one here
        '''

        # TODO: update map location when device choice is changable by user

        if model_builder is not None: self.model, self.opt, self.loss, self.input_mask = model_builder.get_model()
        state = torch.load(name, map_location='cuda' if torch.cuda.is_available() else 'cpu')
        self.model.load_state_dict(state['model'])
        self.opt.load_state_dict(state['opt'])
        self.input_mask = state['input_mask']
        self.objective = self.model_builder.objective if model_builder is None else model_builder.objective 
Example #7
Source File: model.py    From lumin with Apache License 2.0 6 votes vote down vote up
def export2tfpb(self, name:str, bs:int=1) -> None:
        r'''
        Export network to Tensorflow ProtocolBuffer format, via ONNX.
        Note that ONNX expects a fixed batch size (bs) which is the number of datapoints your wish to pass through the model concurrently.

        Arguments:
            name: filename for exported file
            bs: batch size for exported models
        '''

        import onnx
        from onnx_tf.backend import prepare
        warnings.warn("""Tensorflow ProtocolBuffer export of LUMIN models (via ONNX) has not been fully explored or sufficiently tested yet.
                         Please use with caution, and report any trouble""")
        self.export2onnx(name, bs)
        m = onnx.load(f'{name}.onnx')
        tf_rep = prepare(m)
        tf_rep.export_graph(f'{name}.pb') 
Example #8
Source File: traceSaver.py    From bonnetal with MIT License 6 votes vote down vote up
def export_ONNX(self):
    # convert to ONNX traced model

    # create profile
    onnx_path = os.path.join(self.new_path, "model.onnx")
    with torch.no_grad():
      print("Profiling model")
      print("saving model in ", onnx_path)
      torch.onnx.export(self.model, self.dummy_input, onnx_path)

    # check that it worked
    print("Checking that it all worked out")
    model_onnx = onnx.load(onnx_path)
    onnx.checker.check_model(model_onnx)

    # Print a human readable representation of the graph
    # print(onnx.helper.printable_graph(model_onnx.graph)) 
Example #9
Source File: _backend.py    From coremltools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _get_onnx_outputs_info(model):  # type: (...) -> Dict[Text, EdgeInfo]
    """
    Takes in an onnx model and returns a dictionary
    of onnx output names mapped to a tuple that is (output_name, type, shape)
    """
    if isinstance(model, _string_types):
        onnx_model = onnx.load(model)
    elif isinstance(model, onnx.ModelProto):
        onnx_model = model

    graph = onnx_model.graph
    onnx_output_dict = {}
    for o in graph.output:
        out = _input_from_onnx_input(o)
        onnx_output_dict[out[0]] = out
    return onnx_output_dict 
Example #10
Source File: tensorrt_onnx_infer.py    From nutonomy_pointpillars with MIT License 6 votes vote down vote up
def tensorrt_backend_rpn_onnx():

    rpn_input_features = np.ones([1, 64, 496, 432], dtype=np.float32)

    rpn_start_time = time.time()

    rpn_model = onnx.load("rpn.onnx")
    engine = backend.prepare(rpn_model, device="CUDA:0", max_batch_size=1)

    for i in range(1, 1000):
        rpn_outputs = engine.run(rpn_input_features)

    rpn_end_time = time.time()

    print('rpn inference time is : ', (rpn_end_time - rpn_start_time)/1000)
    print(rpn_outputs) 
Example #11
Source File: converter.py    From onnx-tensorflow with Apache License 2.0 6 votes vote down vote up
def convert(infile, outfile, **kwargs):
  """Convert pb.

  Args:
    infile: Input path.
    outfile: Output path.
    **kwargs: Other args for converting.

  Returns:
    None.
  """
  logging_level = kwargs.get("logging_level", "INFO")
  common.logger.setLevel(logging_level)
  common.logger.handlers[0].setLevel(logging_level)

  common.logger.info("Start converting onnx pb to tf pb:")
  onnx_model = onnx.load(infile)
  tf_rep = backend.prepare(onnx_model, **kwargs)
  tf_rep.export_graph(outfile)
  common.logger.info("Converting completes successfully.") 
Example #12
Source File: modelwrapper.py    From finn with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, onnx_model_proto, make_deepcopy=False):
        """Creates a ModelWrapper instance.
        onnx_model_proto can be either a ModelProto instance, or a string
        with the path to a stored .onnx file on disk, or serialized bytes.
        The make_deepcopy option controls whether a deep copy of the ModelProto
        is made internally.
        """
        if isinstance(onnx_model_proto, str):
            self._model_proto = onnx.load(onnx_model_proto)
        elif isinstance(onnx_model_proto, bytes):
            self._model_proto = onnx.load_from_string(onnx_model_proto)
        else:
            if make_deepcopy:
                self._model_proto = copy.deepcopy(onnx_model_proto)
            else:
                self._model_proto = onnx_model_proto 
Example #13
Source File: _backend.py    From onnx-coreml with MIT License 6 votes vote down vote up
def _get_onnx_outputs_info(model): # type: (...) -> Dict[Text, EdgeInfo]
    """
    Takes in an onnx model and returns a dictionary 
    of onnx output names mapped to a tuple that is (output_name, type, shape)
    """
    if isinstance(model, str):
        onnx_model = onnx.load(model)
    elif isinstance(model, onnx.ModelProto):
        onnx_model = model

    graph = onnx_model.graph
    onnx_output_dict = {}
    for o in graph.output:
        out = _input_from_onnx_input(o)
        onnx_output_dict[out[0]] = out
    return onnx_output_dict 
Example #14
Source File: common_caffe2.py    From optimized-models with Apache License 2.0 6 votes vote down vote up
def LoadLabels(label_file):
    """load labels from file"""
    if not os.path.isfile(label_file):
        logging.error("Can not find lable file {}.".format(label_file))
        return None
    labels = {}
    with open(label_file) as l:
        label_lines = [line.rstrip('\n') for line in l.readlines()]
    for line in label_lines:
        result, code = line.partition(" ")[::2]
        if code and result:
            result = result.strip()
            result = result[result.index("/")+1:]
            if result in labels:
                logging.warning("Repeated name {0} for code {1}in label file. Ignored!"
                                .format(result, code))
            else:
                labels[result] = int(code.strip())
    return labels 
Example #15
Source File: common_caffe2.py    From optimized-models with Apache License 2.0 6 votes vote down vote up
def LoadValidation(validation_file):
    """load validation file"""
    if not os.path.isfile(validation_file):
        logging.error("Can not find validation file {}."
                      .format(validation_file))
        return None
    validation = {}
    with open(validation_file) as v:
        validation_lines = [line.rstrip('\n') for line in v.readlines()]
    for line in validation_lines:
        name, code = line.partition(" ")[::2]
        if name and code:
            name = name.strip()
            if name in validation:
                logging.warning("Repeated name {0} for code {1} in"
                                " validation file. Ignored!"
                                .format(name, code))
            else:
                validation[name] = int(code.strip())
    return validation 
Example #16
Source File: __init__.py    From onnx-mxnet with Apache License 2.0 6 votes vote down vote up
def import_model(model_file):
    """Imports the supplied ONNX model file into MXNet symbol and parameters.

    Parameters
    ----------
    model_file : ONNX model file name

    Returns
    -------
    sym : mx.symbol
        Compatible mxnet symbol

    params : dict of str to mx.ndarray
        Dict of converted parameters stored in mx.ndarray format
    """
    graph = GraphProto()

    # loads model file and returns ONNX protobuf object
    model_proto = onnx.load(model_file)
    sym, params = graph.from_onnx(model_proto.graph)
    return sym, params 
Example #17
Source File: neural_style.py    From examples with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def stylize_onnx_caffe2(content_image, args):
    """
    Read ONNX model and run it using Caffe2
    """

    assert not args.export_onnx

    import onnx
    import onnx_caffe2.backend

    model = onnx.load(args.model)

    prepared_backend = onnx_caffe2.backend.prepare(model, device='CUDA' if args.cuda else 'CPU')
    inp = {model.graph.input[0].name: content_image.numpy()}
    c2_out = prepared_backend.run(inp)[0]

    return torch.from_numpy(c2_out) 
Example #18
Source File: input_rewriter_test.py    From chainer-compiler with MIT License 6 votes vote down vote up
def test_rewrite_onnx_testdir():
    input_rewriter.rewrite_onnx_testdir(
        'out/backprop_test_mnist_mlp',
        'out/backprop_test_mnist_mlp_fp64',
        [input_rewriter.Type(dtype=np.float64),
         input_rewriter.Type(dtype=np.float64)])
    xmodel = onnx.load('out/backprop_test_mnist_mlp_fp64/model.onnx')
    xgraph = xmodel.graph

    assert 11 == xgraph.input[0].type.tensor_type.elem_type
    assert 11 == xgraph.input[1].type.tensor_type.elem_type
    assert 11 == xgraph.output[0].type.tensor_type.elem_type
    for init in xgraph.initializer:
        assert 11 == init.data_type

    for tensor_proto in glob.glob(
            'out/backprop_test_mnist_mlp_fp64/test_data_set_0/*.pb'):
        xtensor = onnx.load_tensor(tensor_proto)
        assert 11 == xtensor.data_type 
Example #19
Source File: traceSaver.py    From bonnetal with MIT License 6 votes vote down vote up
def export_ONNX(self):
    # convert to ONNX traced model

    # create profile
    onnx_path = os.path.join(self.new_path, "model.onnx")
    with torch.no_grad():
      print("Profiling model")
      print("saving model in ", onnx_path)
      torch.onnx.export(self.model, self.dummy_input, onnx_path)

    # check that it worked
    print("Checking that it all worked out")
    model_onnx = onnx.load(onnx_path)
    onnx.checker.check_model(model_onnx)

    # Print a human readable representation of the graph
    # print(onnx.helper.printable_graph(model_onnx.graph)) 
Example #20
Source File: convert_onnx.py    From inference with Apache License 2.0 6 votes vote down vote up
def onnx_inference(args):
    # Load the ONNX model
    model = onnx.load("models/deepspeech_{}.onnx".format(args.continue_from))

    # Check that the IR is well formed
    onnx.checker.check_model(model)

    onnx.helper.printable_graph(model.graph)

    print("model checked, preparing backend!")
    rep = backend.prepare(model, device="CPU")  # or "CPU"

    print("running inference!")

    # Hard coded input dim
    inputs = np.random.randn(16, 1, 161, 129).astype(np.float32)

    start = time.time()
    outputs = rep.run(inputs)
    print("time used: {}".format(time.time() - start))
    # To run networks with more than one input, pass a tuple
    # rather than a single numpy ndarray.
    print(outputs[0]) 
Example #21
Source File: test_onnx_model.py    From python-dlpy with Apache License 2.0 5 votes vote down vote up
def test_model9(self):
        try:
            import onnx
        except:
            unittest.TestCase.skipTest(self, "onnx not found in the libraries")

        m = onnx.load(os.path.join(os.path.dirname(__file__), 'datasources', 'pytorch_net1.onnx'))
        model1 = Model.from_onnx_model(self.s, m, offsets=[1, 1, 1,], scale=2, std='std')
        model1.print_summary() 
Example #22
Source File: test_onnx_model.py    From python-dlpy with Apache License 2.0 5 votes vote down vote up
def test_model5(self):
        try:
            import onnx
        except:
            unittest.TestCase.skipTest(self, "onnx not found in the libraries")

        model1 = Sequential(self.s, model_table='Simple_CNN1')
        model1.add(InputLayer(3, 224, 224))
        model1.add(Conv2d(8, 7, act='identity', include_bias=False))
        model1.add(BN(act='relu'))
        model1.add(Pooling(2))
        model1.add(Conv2d(8, 7, act='identity', include_bias=False))
        model1.add(BN(act='relu'))
        model1.add(Pooling(2))
        model1.add(Conv2d(8, 7))
        model1.add(Conv2d(8, 7))
        model1.add(Dense(2))
        model1.add(OutputLayer(act='softmax', n=2))

        if self.data_dir is None:
            unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")

        caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')

        self.s.table.loadtable(caslib=caslib,
                               casout={'name': 'eee', 'replace': True},
                               path=path)

        r = model1.fit(data='eee', inputs='_image_', target='_label_', max_epochs=1)
        self.assertTrue(r.severity == 0)

        import tempfile
        tmp_dir_to_dump = tempfile.gettempdir()

        model1.deploy(tmp_dir_to_dump, output_format='onnx')

        import os
        os.remove(os.path.join(tmp_dir_to_dump, "Simple_CNN1.onnx"))

        if (caslib is not None) and tmp_caslib:
            self.s.retrieve('table.dropcaslib', message_level='error', caslib=caslib) 
Example #23
Source File: test_onnx_model.py    From python-dlpy with Apache License 2.0 5 votes vote down vote up
def test_model1(self):
        try:
            import onnx
        except:
            unittest.TestCase.skipTest(self, "onnx not found in the libraries")

        model1 = Sequential(self.s, model_table='Simple_CNN1')
        model1.add(InputLayer(3, 224, 224))
        model1.add(Conv2d(8, 7))
        model1.add(Pooling(2))
        model1.add(Conv2d(8, 7))
        model1.add(Pooling(2))
        model1.add(Dense(16))
        model1.add(OutputLayer(act='softmax', n=2))

        if self.data_dir is None:
            unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")

        caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')

        self.s.table.loadtable(caslib=caslib,
                               casout={'name': 'eee', 'replace': True},
                               path=path)

        r = model1.fit(data='eee', inputs='_image_', target='_label_', max_epochs=1)
        self.assertTrue(r.severity == 0)

        import tempfile
        tmp_dir_to_dump = tempfile.gettempdir()

        model1.deploy(tmp_dir_to_dump, output_format='onnx')

        import os
        os.remove(os.path.join(tmp_dir_to_dump, "Simple_CNN1.onnx"))

        if (caslib is not None) and tmp_caslib:
            self.s.retrieve('table.dropcaslib', message_level='error', caslib=caslib) 
Example #24
Source File: tests_helper.py    From sklearn-onnx with MIT License 5 votes vote down vote up
def stat_model_onnx(model):
    """
    Computes statistics on the ONNX model.
    """
    import onnx

    gr = onnx.load(model)
    return {"nb_onnx_nodes": len(gr.graph.node)} 
Example #25
Source File: tests_helper.py    From sklearn-onnx with MIT License 5 votes vote down vote up
def stat_model_skl(model):
    """
    Computes statistics on the sklearn model.
    """
    try:
        with open(model, "rb") as f:
            obj = pickle.load(f)
    except EOFError:
        return {"nb_estimators": 0}
    return {"nb_estimators": get_nb_skl_objects(obj)} 
Example #26
Source File: test_onnx_model.py    From python-dlpy with Apache License 2.0 5 votes vote down vote up
def test_model7(self):
        try:
            import onnx
        except:
            unittest.TestCase.skipTest(self, "onnx not found in the libraries")

        m = onnx.load(os.path.join(os.path.dirname(__file__), 'datasources', 'model.onnx'))
        model1 = Model.from_onnx_model(self.s, m, offsets=[1, 1, 1,], scale=2, std='std')
        model1.print_summary() 
Example #27
Source File: service.py    From honk with MIT License 5 votes vote down vote up
def __init__(self, onnx_filename, labels):
        self.labels = labels
        self.model_filename = onnx_filename
        self.audio_processor = AudioPreprocessor()
        self._graph = onnx.load(onnx_filename)
        self._in_name = self._graph.graph.input[0].name
        self.model = onnx_caffe2.backend.prepare(self._graph) 
Example #28
Source File: tensorrt_loaders.py    From NeMo with Apache License 2.0 5 votes vote down vote up
def __call__(self):
        logging.info("Loading {:}".format(self.path))
        return self.check(onnx.load(self.path)) 
Example #29
Source File: tensorrt_loaders.py    From NeMo with Apache License 2.0 5 votes vote down vote up
def __init__(self, path):
        """
        Loads an ONNX model from a file.

        Args:
            path (str): The path from which to load the model.
        """
        self.path = path 
Example #30
Source File: neural_style.py    From examples with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = utils.load_image(args.content_image, scale=args.content_scale)
    content_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    if args.model.endswith(".onnx"):
        output = stylize_onnx_caffe2(content_image, args)
    else:
        with torch.no_grad():
            style_model = TransformerNet()
            state_dict = torch.load(args.model)
            # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
            for k in list(state_dict.keys()):
                if re.search(r'in\d+\.running_(mean|var)$', k):
                    del state_dict[k]
            style_model.load_state_dict(state_dict)
            style_model.to(device)
            if args.export_onnx:
                assert args.export_onnx.endswith(".onnx"), "Export model file should end with .onnx"
                output = torch.onnx._export(style_model, content_image, args.export_onnx).cpu()
            else:
                output = style_model(content_image).cpu()
    utils.save_image(args.output_image, output[0])