Python onnx.helper.make_tensor() Examples

The following are 30 code examples of onnx.helper.make_tensor(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module onnx.helper , or try the search function .
Example #1
Source File: yolov3_to_onnx.py    From iAI with MIT License 6 votes vote down vote up
def load_resize_scales(self, resize_params):
        """Returns the initializers with the value of the scale input
        tensor given by resize_params.

        Keyword argument:
        resize_params -- a ResizeParams object
        """
        initializer = list()
        inputs = list()
        name = resize_params.generate_param_name()
        shape = resize_params.value.shape
        data = resize_params.value
        scale_init = helper.make_tensor(
            name, TensorProto.FLOAT, shape, data)
        scale_input = helper.make_tensor_value_info(
            name, TensorProto.FLOAT, shape)
        initializer.append(scale_init)
        inputs.append(scale_input)
        return initializer, inputs 
Example #2
Source File: onnx_emitter.py    From MMdnn with MIT License 6 votes vote down vote up
def emit_Constant(self, IR_node):
        if IR_node.get_attr('value'):
            value = 'np.array({}, dtype=np.float32)'.format(IR_node.get_attr('value'))
            self.add_body(1, "{:15} = {}".format(
                IR_node.variable_name + '_value_array',
                value))
        else:
            self.add_body(1, "{:15} = __weights_dict['{}']['value']".format(
                IR_node.variable_name + '_value_array',
                IR_node.name))
        self.add_body(1, "{:15} = helper.make_node('Constant', inputs=[], outputs=['{}'], value=helper.make_tensor(name='const_tensor', data_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[{}.dtype], dims={}.shape, vals={}.flatten().astype(float)), name='{}')".format(
                          IR_node.variable_name,
                          IR_node.variable_name,
                          IR_node.variable_name + '_value_array',
                          IR_node.variable_name + '_value_array',
                          IR_node.variable_name + '_value_array',
                          IR_node.variable_name))
        self.nodes.append(IR_node.variable_name) 
Example #3
Source File: helper_test.py    From training_results_v0.6 with Apache License 2.0 6 votes vote down vote up
def test_attr_repeated_tensor_proto(self):  # type: () -> None
        tensors = [
            helper.make_tensor(
                name='a',
                data_type=TensorProto.FLOAT,
                dims=(1,),
                vals=np.ones(1).tolist()
            ),
            helper.make_tensor(
                name='b',
                data_type=TensorProto.FLOAT,
                dims=(1,),
                vals=np.ones(1).tolist()
            )]
        attr = helper.make_attribute("tensors", tensors)
        self.assertEqual(attr.name, "tensors")
        self.assertEqual(list(attr.tensors), tensors)
        checker.check_attribute(attr) 
Example #4
Source File: onnx_emitter.py    From MMdnn with MIT License 6 votes vote down vote up
def emit_Mul(self, IR_node):
        inputs = ', '.join("'" + self.IR_graph.get_node(i).real_variable_name + "'" for i in IR_node.in_edges)
        
        if IR_node.name in self.weights_dict and 'weights' in self.weights_dict[IR_node.name]:
            self.add_body(1,"{:15} = np.array([__weights_dict['{}']['weights']])".format(
                IR_node.variable_name+'_weight_array',
                IR_node.name
            ))
            self.add_body(1, "{:15} = helper.make_node('Constant', inputs=[], outputs=['{}'], value=helper.make_tensor(name='const_tensor', data_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[{}.dtype], dims={}.shape, vals={}), name='{}')".format(
                    IR_node.variable_name + '_weight',
                    IR_node.variable_name + '_weight',
                    IR_node.variable_name + '_weight_array',
                    IR_node.variable_name + '_weight_array',
                    IR_node.variable_name + '_weight_array',
                    IR_node.variable_name + '_weight'
                    ))
            inputs += ', '+''.join("'"+IR_node.variable_name +"_weight'")
            self.nodes.append(IR_node.variable_name+'_weight')

        self.add_body(1, "{:15} = helper.make_node('Mul', inputs=[{}], outputs=['{}'], broadcast=1, name='{}')".format(
            IR_node.variable_name,
            inputs,
            IR_node.variable_name,
            IR_node.variable_name))
        self.nodes.append(IR_node.variable_name) 
Example #5
Source File: onnx_emitter.py    From MMdnn with MIT License 6 votes vote down vote up
def emit_Reshape(self, IR_node):
        shape = [item if item != -1 else 1 for item in IR_node.get_attr('shape')]
        if len(shape) == 4:
            shape = [shape[i] for i in [0, 3, 1, 2]]
        shape_str = ', '.join('%s' % i for i in shape)
        self.add_body(1, "{:15} = np.array([{}], dtype=np.int64)".format(
            IR_node.variable_name + '_shape_array',
            shape_str
        ))
        self.add_body(1, "{:15} = helper.make_node('Constant', inputs=[], outputs=['{}'], value=helper.make_tensor(name='const_tensor', data_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[{}.dtype], dims={}.shape, vals={}), name='{}')".format(
                          IR_node.variable_name + '_shape',
                          IR_node.variable_name + '_shape',
                          IR_node.variable_name + '_shape_array',
                          IR_node.variable_name + '_shape_array',
                          IR_node.variable_name + '_shape_array',
                          IR_node.variable_name + '_shape'))
        self.add_body(1, "{:15} = helper.make_node('Reshape', inputs=['{}', '{}'], outputs=['{}'], name='{}')".format(
            IR_node.variable_name,
            self.parent_variable_name(IR_node),
            IR_node.variable_name + '_shape',
            IR_node.variable_name,
            IR_node.variable_name))
        self.nodes.append(IR_node.variable_name + '_shape')
        self.nodes.append(IR_node.variable_name) 
Example #6
Source File: yolov3_to_onnx.py    From tensorrt_demos with MIT License 6 votes vote down vote up
def load_upsample_scales(self, upsample_params):
        """Returns the initializers with the value of the scale input
        tensor given by upsample_params.

        Keyword argument:
        upsample_params -- a UpsampleParams object
        """
        initializer = list()
        inputs = list()
        name = upsample_params.generate_param_name()
        shape = upsample_params.value.shape
        data = upsample_params.value
        scale_init = helper.make_tensor(
            name, TensorProto.FLOAT, shape, data)
        scale_input = helper.make_tensor_value_info(
            name, TensorProto.FLOAT, shape)
        initializer.append(scale_init)
        inputs.append(scale_input)
        return initializer, inputs 
Example #7
Source File: yolov3_to_onnx.py    From tensorrt_demos with MIT License 6 votes vote down vote up
def _create_param_tensors(self, conv_params, param_category, suffix):
        """Creates the initializers with weights from the weights file together with
        the input tensors.

        Keyword arguments:
        conv_params -- a ConvParams object
        param_category -- the category of parameters to be created ('bn' or 'conv')
        suffix -- a string determining the sub-type of above param_category (e.g.,
        'weights' or 'bias')
        """
        param_name, param_data, param_data_shape = self._load_one_param_type(
            conv_params, param_category, suffix)

        initializer_tensor = helper.make_tensor(
            param_name, TensorProto.FLOAT, param_data_shape, param_data)
        input_tensor = helper.make_tensor_value_info(
            param_name, TensorProto.FLOAT, param_data_shape)
        return initializer_tensor, input_tensor 
Example #8
Source File: optimizer_test.py    From training_results_v0.6 with Apache License 2.0 6 votes vote down vote up
def test_eliminate_unused_initializer_no_eliminate_output(self):  # type: () -> None
        add = helper.make_node("Add", ["X", "Y"], ["Z"])
        graph = helper.make_graph(
            [add],
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
             helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2)),
             helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
            [helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2)),
             helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
            [helper.make_tensor("A", TensorProto.FLOAT,
                                dims=(2, 3),
                                vals=np.random.randn(2, 3).astype(np.float32).tobytes(),
                                raw=True)])
        optimized_model = self._optimized(graph, ["eliminate_unused_initializer"])

        assert len(list(optimized_model.graph.initializer)) == 1
        assert "Z" in [o.name for o in optimized_model.graph.output] 
Example #9
Source File: optimizer_test.py    From training_results_v0.6 with Apache License 2.0 6 votes vote down vote up
def test_eliminate_unused_initializer_input(self):  # type: () -> None
        add = helper.make_node("Add", ["X", "Y"], ["Z"])
        graph = helper.make_graph(
            [add],
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
             helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2)),
             helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
            [helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
            [helper.make_tensor("A", TensorProto.FLOAT,
                                dims=(2, 3),
                                vals=np.random.randn(2, 3).astype(np.float32).tobytes(),
                                raw=True)])
        optimized_model = self._optimized(graph, ["eliminate_unused_initializer"])

        assert len(list(optimized_model.graph.initializer)) == 0
        assert len(optimized_model.graph.input) == 2 
Example #10
Source File: yolo2onnx.py    From MIT-Driverless-CV-TrainingInfra with Apache License 2.0 6 votes vote down vote up
def _create_param_tensors(self, conv_params, param_category, suffix):
        """Creates the initializers with weights from the weights file together with
        the input tensors.

        Keyword arguments:
        conv_params -- a ConvParams object
        param_category -- the category of parameters to be created ('bn' or 'conv')
        suffix -- a string determining the sub-type of above param_category (e.g.,
        'weights' or 'bias')
        """
        param_name, param_data, param_data_shape = self._load_one_param_type(
            conv_params, param_category, suffix)

        initializer_tensor = helper.make_tensor(param_name, TensorProto.FLOAT, param_data_shape, param_data)
        input_tensor = helper.make_tensor_value_info(param_name, TensorProto.FLOAT, param_data_shape)
        return initializer_tensor, input_tensor 
Example #11
Source File: optimizer_test.py    From training_results_v0.6 with Apache License 2.0 6 votes vote down vote up
def _make_fake_if_op(self,
                         true_nodes,  # type: Sequence[NodeProto]
                         false_nodes,  # type: Sequence[NodeProto]
                         output_types  # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
                         ):  # type: (...) -> List[NodeProto]
        true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
        true_graph = helper.make_graph(true_nodes, "true_graph", [], [])
        false_graph = helper.make_graph(false_nodes, "false_graph", [], [])
        if_inputs = ["condition"]
        if_outputs = [name for _, _, name in output_types]
        retval_nodes = [
            helper.make_node("Constant", [], ["condition"], value=true),
            helper.make_node("If", if_inputs, if_outputs, then_branch=true_graph,
                             else_branch=false_graph)
        ]
        return retval_nodes

    # fn is a function that takes a single node as argument 
Example #12
Source File: yolov3_to_onnx.py    From yolov3-tensorrt with MIT License 6 votes vote down vote up
def _create_param_tensors(self, conv_params, param_category, suffix):
        """Creates the initializers with weights from the weights file together with
        the input tensors.

        Keyword arguments:
        conv_params -- a ConvParams object
        param_category -- the category of parameters to be created ('bn' or 'conv')
        suffix -- a string determining the sub-type of above param_category (e.g.,
        'weights' or 'bias')
        """
        param_name, param_data, param_data_shape = self._load_one_param_type(
            conv_params, param_category, suffix)

        initializer_tensor = helper.make_tensor(
            param_name, TensorProto.FLOAT, param_data_shape, param_data)
        input_tensor = helper.make_tensor_value_info(
            param_name, TensorProto.FLOAT, param_data_shape)
        return initializer_tensor, input_tensor 
Example #13
Source File: backend.py    From training_results_v0.6 with Apache License 2.0 6 votes vote down vote up
def make_node_test_model(node, inputs, use_weights=True):
    # HACK TODO: The output info is unknown here; not sure what the best solution is
    output_dtype = np.float32 # Dummy value only
    output_shape = [-99]      # Dummy value only
    graph_inputs = [onnx_helper.make_tensor_value_info(
        name, np2onnx_dtype(array.dtype), array.shape)
                    for name, array in zip(node.input, inputs)]
    graph_outputs = [onnx_helper.make_tensor_value_info(
        name, np2onnx_dtype(output_dtype), output_shape)
                     for name in node.output]
    if use_weights:
        # Add initializers for all inputs except the first
        initializers = [onnx_helper.make_tensor(
            name, np2onnx_dtype(array.dtype), array.shape, array.flatten().tolist())
                        for name, array in zip(node.input[1:], inputs[1:])]
    else:
        initializers = []
    graph = onnx_helper.make_graph(
           [node], "RunNodeGraph_" + node.op_type,
           graph_inputs, graph_outputs, initializer=initializers)
    model = onnx_helper.make_model(graph)
    return model 
Example #14
Source File: test_model_wrappers.py    From ngraph-python with Apache License 2.0 6 votes vote down vote up
def test_attribute_wrapper():
    def attribute_value_test(attribute_value):
        node = make_node('Abs', ['X'], [], name='test_node', test_attribute=attribute_value)
        model = make_model(make_graph([node], 'test_graph', [
            make_tensor_value_info('X', onnx.TensorProto.FLOAT, [1, 2]),
        ], []), producer_name='ngraph')
        wrapped_attribute = ModelWrapper(model).graph.node[0].get_attribute('test_attribute')
        return wrapped_attribute.get_value()

    tensor = make_tensor('test_tensor', onnx.TensorProto.FLOAT, [1], [1])

    assert attribute_value_test(1) == 1
    assert type(attribute_value_test(1)) == np.long
    assert attribute_value_test(1.0) == 1.0
    assert type(attribute_value_test(1.0)) == np.float
    assert attribute_value_test('test') == 'test'
    assert attribute_value_test(tensor)._proto == tensor

    assert attribute_value_test([1, 2, 3]) == [1, 2, 3]
    assert attribute_value_test([1.0, 2.0, 3.0]) == [1.0, 2.0, 3.0]
    assert attribute_value_test(['test1', 'test2']) == ['test1', 'test2']
    assert attribute_value_test([tensor, tensor])[1]._proto == tensor 
Example #15
Source File: streamingmaxpool_batch.py    From finn with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def make_shape_compatible_op(self, model):
        exp_ishape = self.get_normal_input_shape()
        oshape = self.get_normal_output_shape()
        ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
        assert ishape == exp_ishape, "Unexpect input shape for StreamingMaxPool."
        # implement tensor with correct shape
        values = np.random.randn(*oshape).astype(np.float32)
        return helper.make_node(
            "Constant",
            inputs=[],
            outputs=[self.onnx_node.output[0]],
            value=helper.make_tensor(
                name="const_tensor",
                data_type=TensorProto.FLOAT,
                dims=values.shape,
                vals=values.flatten().astype(float),
            ),
        ) 
Example #16
Source File: streamingdatawidthconverter_batch.py    From finn with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def make_shape_compatible_op(self, model):
        exp_ishape = self.get_normal_input_shape()
        oshape = self.get_normal_output_shape()
        ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
        assert ishape == tuple(exp_ishape), "Unexpect input shape for StreamingDWC."
        # implement tensor with correct shape
        values = np.random.randn(*oshape).astype(np.float32)
        return helper.make_node(
            "Constant",
            inputs=[],
            outputs=[self.onnx_node.output[0]],
            value=helper.make_tensor(
                name="const_tensor",
                data_type=TensorProto.FLOAT,
                dims=values.shape,
                vals=values.flatten().astype(float),
            ),
        ) 
Example #17
Source File: convolutioninputgenerator.py    From finn with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def make_shape_compatible_op(self, model):
        exp_ishape = self.get_normal_input_shape()
        oshape = self.get_normal_output_shape()
        ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
        assert ishape == exp_ishape, "Unexpect input shape for ConvInpGen."
        # implement tensor with correct shape
        values = np.random.randn(*oshape).astype(np.float32)
        return helper.make_node(
            "Constant",
            inputs=[],
            outputs=[self.onnx_node.output[0]],
            value=helper.make_tensor(
                name="const_tensor",
                data_type=TensorProto.FLOAT,
                dims=values.shape,
                vals=values.flatten().astype(float),
            ),
        ) 
Example #18
Source File: yolov3_to_onnx.py    From iAI with MIT License 6 votes vote down vote up
def _create_param_tensors(self, conv_params, param_category, suffix):
        """Creates the initializers with weights from the weights file together with
        the input tensors.

        Keyword arguments:
        conv_params -- a ConvParams object
        param_category -- the category of parameters to be created ('bn' or 'conv')
        suffix -- a string determining the sub-type of above param_category (e.g.,
        'weights' or 'bias')
        """
        param_name, param_data, param_data_shape = self._load_one_param_type(
            conv_params, param_category, suffix)

        initializer_tensor = helper.make_tensor(
            param_name, TensorProto.FLOAT, param_data_shape, param_data)
        input_tensor = helper.make_tensor_value_info(
            param_name, TensorProto.FLOAT, param_data_shape)
        return initializer_tensor, input_tensor 
Example #19
Source File: pb_wrapper.py    From onnx-tensorflow with Apache License 2.0 6 votes vote down vote up
def add_const_proto_explicit(self,
                               name,
                               value,
                               np_dtype=None,
                               tf_dtype=None,
                               onnx_dtype=None):
    onnx_dtype = any_dtype_to_onnx_dtype(
        np_dtype=np_dtype, tf_dtype=tf_dtype, onnx_dtype=onnx_dtype)

    const_dim = len(value.shape)

    if const_dim == 0:
      raw_values = [value.tolist()]
      values = [value]
    else:
      raw_values = value.flatten().tolist()
      values = value

    shape = np.array(values).shape
    const_proto = make_tensor(
        name=name, data_type=onnx_dtype, dims=shape, vals=raw_values)
    self._consts_proto.append(const_proto) 
Example #20
Source File: yolov3_to_onnx.py    From iAI with MIT License 6 votes vote down vote up
def _create_param_tensors(self, conv_params, param_category, suffix):
        """Creates the initializers with weights from the weights file together with
        the input tensors.

        Keyword arguments:
        conv_params -- a ConvParams object
        param_category -- the category of parameters to be created ('bn' or 'conv')
        suffix -- a string determining the sub-type of above param_category (e.g.,
        'weights' or 'bias')
        """
        param_name, param_data, param_data_shape = self._load_one_param_type(
            conv_params, param_category, suffix)

        initializer_tensor = helper.make_tensor(
            param_name, TensorProto.FLOAT, param_data_shape, param_data)
        input_tensor = helper.make_tensor_value_info(
            param_name, TensorProto.FLOAT, param_data_shape)
        return initializer_tensor, input_tensor 
Example #21
Source File: test_opt.py    From onnxconverter-common with MIT License 5 votes vote down vote up
def test_fan_in(self):
        val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)

        nodes = []
        nodes[0:] = \
            [helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
                name='const0',
                data_type=onnx_proto.TensorProto.FLOAT,
                dims=val.shape,
                vals=val.flatten().astype(float)),
                              name="0")]
        nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'], name="1")]
        nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'], name="2")]
        nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'], name="3")]
        nodes[4:] = [helper.make_node('LeakyRelu', ['max0'], ['leak0'], name="4")]
        nodes[5:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak1'], name="5")]
        nodes[6:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak2'], name="6")]
        nodes[7:] = [helper.make_node('Transpose', ['leak1'], ['tranpose0'], perm=[0, 2, 3, 1], name="7")]
        nodes[8:] = [helper.make_node('Transpose', ['leak2'], ['tranpose1'], perm=[0, 2, 3, 1], name="8")]
        nodes[9:] = [helper.make_node('Add', ['tranpose0', 'tranpose1'], ['add0'], name="9")]
        nodes[10:] = [helper.make_node('Transpose', ['add0'], ['tranpose2'], perm=[0, 3, 1, 2], name="10")]
        nodes[11:] = [helper.make_node('Conv', ['tranpose2'], ['output0'], name="11")]

        input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
        output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])

        graph = helper.make_graph(nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        self.assertIsNotNone(model)

        onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
        new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
        new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
        graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
        self.assertEqual(len(new_nodes), 6)
        self.assertIsNotNone(model) 
Example #22
Source File: mx2onnx_converter_functions.py    From mxnet_to_onnx with Apache License 2.0 5 votes vote down vote up
def convert_weights_and_inputs(node, **kwargs):
    name = node["name"]
    if looks_like_weight(name):
        weights = kwargs["weights"]
        initializer = kwargs["initializer"]
        weights = kwargs["weights"]
        np_arr = weights[name]
        data_type = mapping.NP_TYPE_TO_TENSOR_TYPE[np_arr.dtype]
        dims = np.shape(np_arr)

        tensor_node = helper.make_tensor_value_info(name, data_type, dims)

        initializer.append(
            helper.make_tensor(
                name=name, 
                data_type=data_type,
                dims=dims,
                vals=np_arr.flatten().tolist(),
                raw=False,
            )
        )

        return tensor_node
    else:
        tval_node = helper.make_tensor_value_info(name, kwargs["in_type"], kwargs["in_shape"])
        return tval_node 
Example #23
Source File: test_opt.py    From onnxconverter-common with MIT License 5 votes vote down vote up
def test_merge_common(self):
        val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)

        nodes = []
        nodes[0:] = \
            [helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
                name='const0',
                data_type=onnx_proto.TensorProto.FLOAT,
                dims=val.shape,
                vals=val.flatten().astype(float)),
                              name="0")]
        nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'], name="1")]
        nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'], name="2")]
        nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'], name="3")]
        nodes[4:] = [helper.make_node('LeakyRelu', ['max0'], ['leak0'], name="4")]
        nodes[5:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak1'], name="5")]
        nodes[6:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak2'], name="6")]
        nodes[7:] = [helper.make_node('Cast', ['leak1'], ['cast0'], to=6, name="7")]
        nodes[8:] = [helper.make_node('Cast', ['cast0'], ['cast1'], to=1, name="8")]
        nodes[9:] = [helper.make_node('Cast', ['leak2'], ['cast2'], to=6, name="9")]
        nodes[10:] = [helper.make_node('Cast', ['cast2'], ['cast3'], to=7, name="10")]
        nodes[11:] = [helper.make_node('Cast', ['cast3'], ['cast4'], to=1, name="11")]
        nodes[12:] = [helper.make_node('Add', ['cast1', 'cast4'], ['add0'], name="12")]
        nodes[13:] = [helper.make_node('Transpose', ['add0'], ['tranpose2'], perm=[0, 3, 1, 2], name="13")]
        nodes[14:] = [helper.make_node('Conv', ['tranpose2'], ['output0'], name="14")]

        input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
        output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])

        graph = helper.make_graph(nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        self.assertIsNotNone(model)

        onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
        new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
        new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
        graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
        self.assertEqual(len(new_nodes), 11)
        self.assertIsNotNone(model) 
Example #24
Source File: test_opt.py    From onnxconverter-common with MIT License 5 votes vote down vote up
def test_merge(self):
        val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)

        nodes = []
        nodes[0:] = \
            [helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
                name='const0',
                data_type=onnx_proto.TensorProto.FLOAT,
                dims=val.shape,
                vals=val.flatten().astype(float)))]
        nodes[1:] = [helper.make_node('Max', ['input1'], ['max0'])]
        nodes[2:] = [helper.make_node('Transpose', ['max0'], ['tranpose0'], perm=[0, 2, 3, 1])]
        nodes[3:] = [helper.make_node('Transpose', ['tranpose0'], ['add_input1'], perm=(0, 3, 1, 2))]
        nodes[4:] = [helper.make_node('Add', ['max0', 'add_input1'], ['output0'])]

        input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
        output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])

        graph = helper.make_graph(nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        self.assertIsNotNone(model)

        onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
        new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
        new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
        graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
        self.assertEqual(len(new_nodes), 3)
        self.assertIsNotNone(model) 
Example #25
Source File: test_opt.py    From onnxconverter-common with MIT License 5 votes vote down vote up
def test_move_transpose(self):
        val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)

        nodes = []
        nodes[0:] = \
            [helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
                name='const0',
                data_type=onnx_proto.TensorProto.FLOAT,
                dims=val.shape,
                vals=val.flatten().astype(float)))]
        nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'])]
        nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'])]
        nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'])]
        nodes[4:] = [helper.make_node('Transpose', ['max0'], ['tranpose0'], perm=[0, 2, 3, 1])]
        nodes[5:] = [helper.make_node('LeakyRelu', ['tranpose0'], ['tranpose1'])]
        nodes[6:] = [helper.make_node('Relu', ['tranpose1'], ['output0'], perm=(0, 3, 1, 2))]

        input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
        output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])

        graph = helper.make_graph(nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        self.assertIsNotNone(model)

        onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
        new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
        new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
        self.assertEqual(len(new_nodes), 5)
        graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
        self.assertIsNotNone(model) 
Example #26
Source File: test_opt.py    From onnxconverter-common with MIT License 5 votes vote down vote up
def test_NextToOutputSolution(self):
        val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)

        nodes = []
        nodes[0:] = \
            [helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
                name='const0',
                data_type=onnx_proto.TensorProto.FLOAT,
                dims=val.shape,
                vals=val.flatten().astype(float)),
                              name="0")]
        nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'], name="1")]
        nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'], name="2")]
        nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'], name="3")]
        nodes[4:] = [helper.make_node('Identity', ['max0'], ['output0'], name="4")]

        input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
        output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])

        graph = helper.make_graph(nodes, 'test_NextToOutputSolution', [input0], [output0])
        model = helper.make_model(graph)
        self.assertIsNotNone(model)

        new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
        new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
        graph = helper.make_graph(new_nodes, 'test_NextToOutputSolution', [input0], [output0])
        model = helper.make_model(graph)
        self.assertEqual(len(new_nodes), 2)
        self.assertIsNotNone(model) 
Example #27
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def test_expand():

    def _test_expand(name, data, shape, ref_data):
        shape_array = np.array(shape)
        shape_node = onnx.helper.make_node('Constant',
                                    inputs=[],
                                    outputs=['shape'],
                                    value=onnx.helper.make_tensor(name = 'const_tensor',
                                                                  data_type = onnx.TensorProto.INT32,
                                                                  dims = shape_array.shape,
                                                                  vals = shape_array.flatten().astype('int32')))
        expand_node = helper.make_node("Expand", ["in", "shape"], ["out"])

        graph = helper.make_graph([shape_node, expand_node],
                                "expand_test",
                                inputs = [helper.make_tensor_value_info("in",
                                                TensorProto.FLOAT, list(data.shape))],
                                outputs = [helper.make_tensor_value_info("out",
                                                TensorProto.FLOAT, list(ref_data.shape))])

        model = helper.make_model(graph, producer_name=name)

        for target, ctx in ctx_list():
            tvm_out = get_tvm_output(model, data, target, ctx, ref_data.shape, 'float32')

        tvm.testing.assert_allclose(ref_data, tvm_out)

    in_shape = (3, 1)
    shape = (3, 4)
    data = np.random.uniform(size=in_shape).astype(np.float32)
    ref_data = np.tile(data, 4)
    _test_expand('expand_with_dim_unchanged_test', data, shape, ref_data)

    in_shape = (3, 1)
    shape = (2, 1, 6)
    data = np.random.uniform(size=in_shape).astype(np.float32)
    ref_data = data * np.ones(shape, dtype=np.float32)
    _test_expand('expand_with_dim_changed_test', data, shape, ref_data) 
Example #28
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def test_reshape():
    in_shape = (4, 3, 3, 4)
    ref_shape = (6, 2, 4, 3)

    ref_array = np.array(ref_shape)
    ref_node = onnx.helper.make_node('Constant',
                                     inputs=[],
                                     outputs=['ref_in'],
                                     value=onnx.helper.make_tensor(name='const_tensor',
                                                                   data_type=onnx.TensorProto.INT32,
                                                                   dims=ref_array.shape,
                                                                   vals=ref_array.flatten().astype(int)))
    reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])

    graph = helper.make_graph([ref_node, reshape_node],
                              "reshape_test",
                              inputs=[helper.make_tensor_value_info("in",
                                                                    TensorProto.FLOAT, list(in_shape))],
                              outputs=[helper.make_tensor_value_info("out",
                                                                     TensorProto.FLOAT, list(ref_shape))])

    model = helper.make_model(graph, producer_name='reshape_test')

    for target, ctx in ctx_list():
        x = np.random.uniform(size=in_shape).astype('int32')
        tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'float32')

    tvm.testing.assert_allclose(ref_shape, tvm_out.shape) 
Example #29
Source File: ada_boost.py    From sklearn-onnx with MIT License 5 votes vote down vote up
def _apply_gather_elements(scope, container, inputs, output, axis,
                           dim, zero_type, suffix):
    if container.target_opset >= 11:
        container.add_node(
            'GatherElements', inputs, output, op_version=11, axis=axis,
            name=scope.get_unique_operator_name('GatEls' + suffix))
    else:
        classes_ind_name = scope.get_unique_variable_name('classes_ind2')
        container.add_initializer(
            classes_ind_name, onnx_proto.TensorProto.INT64,
            (1, dim), list(range(dim)))

        shape_name = scope.get_unique_variable_name('shape')
        container.add_node(
            'Shape', inputs[0], shape_name,
            name=scope.get_unique_operator_name('Shape'))
        zero_name = scope.get_unique_variable_name('zero')
        zero_val = (0 if zero_type == onnx_proto.TensorProto.INT64
                    else 0.)
        container.add_node(
            'ConstantOfShape', shape_name, zero_name,
            name=scope.get_unique_operator_name('CoSA'),
            value=make_tensor("value", zero_type,
                              (1, ), [zero_val]), op_version=9)

        equal_name = scope.get_unique_variable_name('equal')
        container.add_node('Equal', [inputs[1], classes_ind_name],
                           equal_name,
                           name=scope.get_unique_operator_name('Equal'))

        selected = scope.get_unique_variable_name('selected')
        container.add_node('Where', [equal_name, inputs[0], zero_name],
                           selected,
                           name=scope.get_unique_operator_name('Where'))
        container.add_node('ReduceSum', selected, output, axes=[1],
                           name=scope.get_unique_operator_name('ReduceSum')) 
Example #30
Source File: backend.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def make_graph(node, inputs):
        """ Created ONNX GraphProto from node"""
        initializer = []
        tensor_input_info = []
        tensor_output_info = []

        # Adding input tensor info.
        for index in range(len(node.input)):
            tensor_input_info.append(
                helper.make_tensor_value_info(str(node.input[index]), TensorProto.FLOAT, [1]))

            # Creating an initializer for Weight params.
            # Assumes that weight params is named as 'W'.
            if node.input[index] == 'W':
                dim = inputs[index].shape
                param_tensor = helper.make_tensor(
                    name=node.input[index],
                    data_type=TensorProto.FLOAT,
                    dims=dim,
                    vals=inputs[index].flatten())

                initializer.append(param_tensor)

        # Adding output tensor info.
        for index in range(len(node.output)):
            tensor_output_info.append(
                helper.make_tensor_value_info(str(node.output[index]), TensorProto.FLOAT, [1]))

        # creating graph proto object.
        graph_proto = helper.make_graph(
            [node],
            "test",
            tensor_input_info,
            tensor_output_info,
            initializer=initializer)

        return graph_proto