Python onnx.TensorProto.INT32 Examples

The following are 30 code examples of onnx.TensorProto.INT32(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module onnx.TensorProto , or try the search function .
Example #1
Source File: BertOnnxModel.py    From FARM with Apache License 2.0 6 votes vote down vote up
def cast_input_to_int32(self, input_name):
        cast_output = input_name + '_int32'

        # Avoid consequent Cast nodes.
        inputs = [input_name]
        output_name_to_node = self.output_name_to_node()
        if input_name in output_name_to_node:
            parent_node = output_name_to_node[input_name]
            if parent_node and parent_node.op_type == 'Cast':
                inputs = [parent_node.input[0]]

        cast_node = onnx.helper.make_node('Cast', inputs=inputs, outputs=[cast_output])
        cast_node.attribute.extend([onnx.helper.make_attribute("to", int(TensorProto.INT32))])
        self.add_node(cast_node)

        return cast_output, cast_node 
Example #2
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def verify_gather_nd(in_shape, indices, dtype):
    x = np.random.uniform(size=in_shape).astype(dtype)
    indices = np.array(indices, dtype="int32")
    out_np = topi.testing.gather_nd_python(x, indices)

    y = helper.make_node("GatherND", ['in', 'indices'], ['out'])

    graph = helper.make_graph([y],
                              'gather_test',
                              inputs=[helper.make_tensor_value_info("in",
                                                                    TensorProto.FLOAT, list(in_shape)),
                                      helper.make_tensor_value_info("indices",
                                                                    TensorProto.INT32, list(indices.shape))],
                              outputs=[helper.make_tensor_value_info("out",
                                                                     TensorProto.FLOAT, list(out_np.shape))])
    model = helper.make_model(graph, producer_name='gather_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(
            model, [x, indices], target, ctx, out_np.shape)
        tvm.testing.assert_allclose(out_np, tvm_out) 
Example #3
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def verify_scatter(in_shape, indices, axis):
    x = np.random.uniform(size=in_shape).astype("float32")
    indices = np.array(indices, dtype="int32")
    updates = np.random.uniform(size=indices.shape).astype("float32")

    y = helper.make_node("ScatterElements", ['data', 'indices', 'updates'], ['output'], axis=axis)

    graph = helper.make_graph([y],
                              'scatter_test',
                              inputs=[helper.make_tensor_value_info("data",
                                                                    TensorProto.FLOAT, list(in_shape)),
                                      helper.make_tensor_value_info("indices",
                                                                    TensorProto.INT32, list(indices.shape)),
                                      helper.make_tensor_value_info("updates",
                                                                    TensorProto.FLOAT, list(indices.shape))],
                              outputs=[helper.make_tensor_value_info("output",
                                                                     TensorProto.FLOAT, list(in_shape))])
    model = helper.make_model(graph, producer_name='scatter_test')
    onnx_out = get_onnxruntime_output(model, [x, indices, updates])

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(
            model, [x, indices, updates], target, ctx, onnx_out[0].shape)
        tvm.testing.assert_allclose(onnx_out[0], tvm_out) 
Example #4
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def verify_gather(in_shape, indices, axis, dtype):
    x = np.random.uniform(size=in_shape).astype(dtype)
    indices = np.array(indices, dtype="int32")
    out_np = np.take(x, indices, axis=axis)

    y = helper.make_node("Gather", ['in', 'indices'], ['out'], axis=axis)

    graph = helper.make_graph([y],
                              'gather_test',
                              inputs=[helper.make_tensor_value_info("in",
                                                                    TensorProto.FLOAT, list(in_shape)),
                                      helper.make_tensor_value_info("indices",
                                                                    TensorProto.INT32, list(indices.shape))],
                              outputs=[helper.make_tensor_value_info("out",
                                                                     TensorProto.FLOAT, list(out_np.shape))])
    model = helper.make_model(graph, producer_name='gather_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(
            model, [x, indices], target, ctx, out_np.shape)
        tvm.testing.assert_allclose(out_np, tvm_out) 
Example #5
Source File: test_forward.py    From training_results_v0.6 with Apache License 2.0 6 votes vote down vote up
def verify_gather(in_shape, indices, axis, dtype):
    x = np.random.uniform(size=in_shape).astype(dtype)
    indices = np.array(indices, dtype="int32")
    out_np = np.take(x, indices, axis=axis)

    y = helper.make_node("Gather", ['in', 'indices'], ['out'], axis=axis)

    graph = helper.make_graph([y],
                              'gather_test',
                              inputs = [helper.make_tensor_value_info("in",
                                            TensorProto.FLOAT, list(in_shape)),
                                        helper.make_tensor_value_info("indices",
                                            TensorProto.INT32, list(indices.shape))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(out_np.shape))])
    model = helper.make_model(graph, producer_name='gather_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, [x, indices], target, ctx, out_np.shape)
        np.testing.assert_allclose(out_np, tvm_out) 
Example #6
Source File: _converter.py    From coremltools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _transform_coreml_dtypes(
    builder,  # type : NeuralNetworkBuilder
    inputs,  # type: List[EdgeInfo]
    outputs,  # type: List[EdgeInfo]
):
    # type: (...) -> None

    """ Make sure ONNX input/output data types are mapped to the equivalent CoreML types
    """
    for i, input_ in enumerate(inputs):
        onnx_type = input_[1]
        if onnx_type == TensorProto.FLOAT:
            _update_multiarray_to_float32(builder.spec.description.input[i])
        elif onnx_type == TensorProto.DOUBLE:
            continue
        elif onnx_type == TensorProto.INT32 or onnx_type == TensorProto.INT64:
            _update_multiarray_to_int32(builder.spec.description.input[i])
        elif onnx_type == TensorProto.BOOL:
            _update_multiarray_to_float32(builder.spec.description.input[i])
        else:
            raise TypeError("Input must be of of type FLOAT, DOUBLE, INT32 or INT64")

    for i, output_ in enumerate(outputs):
        onnx_type = output_[1]
        if onnx_type == TensorProto.FLOAT:
            _update_multiarray_to_float32(builder.spec.description.output[i])
        elif onnx_type == TensorProto.DOUBLE:
            continue
        elif onnx_type == TensorProto.INT32 or onnx_type == TensorProto.INT64:
            _update_multiarray_to_int32(builder.spec.description.output[i])
        elif onnx_type == TensorProto.BOOL:
            _update_multiarray_to_float32(builder.spec.description.output[i])
        else:
            raise TypeError("Output must be of of type FLOAT, DOUBLE, INT32 or INT64") 
Example #7
Source File: test_dynamic_shape.py    From onnx-tensorflow with Apache License 2.0 6 votes vote down vote up
def test_eye_like(self):
    if legacy_opset_pre_ver(9):
      raise unittest.SkipTest("ONNX version {} doesn't support EyeLike.".format(
          defs.onnx_opset_version()))
    shape = [6, 10]
    off_diagonal_offset = -3
    x = self._get_rnd_int(0, 100, shape=shape)
    y = np.eye(shape[0], shape[1], k=off_diagonal_offset, dtype=np.float32)
    node_def = helper.make_node("EyeLike", ["x"], ["y"],
                                dtype=TensorProto.FLOAT,
                                k=off_diagonal_offset)
    graph_def = helper.make_graph(
        [node_def],
        name="test_unknown_shape",
        inputs=[
            helper.make_tensor_value_info("x", TensorProto.INT32, [None, None])
        ],
        outputs=[
            helper.make_tensor_value_info("y", TensorProto.FLOAT, [None, None])
        ])
    tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
    output = tf_rep.run({"x": x})
    np.testing.assert_equal(output["y"], y) 
Example #8
Source File: test_dynamic_shape.py    From onnx-tensorflow with Apache License 2.0 6 votes vote down vote up
def test_gather_nd(self):
    if legacy_opset_pre_ver(11):
      raise unittest.SkipTest(
          "ONNX version {} doesn't support GatherND.".format(
              defs.onnx_opset_version()))
    # valid positive and negative indices for elements
    data = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
    indices = np.array([[0, 0], [1, -3]], dtype=np.int64)
    ref_output = np.array([1, 4], dtype=np.int32)
    node_def = helper.make_node("GatherND", ["data", "indices"], ["outputs"])
    graph_def = helper.make_graph(
        [node_def],
        name="test_unknown_shape",
        inputs=[
            helper.make_tensor_value_info("data", TensorProto.INT32,
                                          [None, None]),
            helper.make_tensor_value_info("indices", TensorProto.INT64,
                                          [None, None])
        ],
        outputs=[
            helper.make_tensor_value_info("outputs", TensorProto.INT32, [None])
        ])
    tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
    output = tf_rep.run({"data": data, "indices": indices})
    np.testing.assert_almost_equal(output["outputs"], ref_output) 
Example #9
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def test_reshape():
    in_shape = (4, 3, 3, 4)
    ref_shape = (6, 2, 4, 3)

    ref_array = np.array(ref_shape)
    ref_node = onnx.helper.make_node('Constant',
                                     inputs=[],
                                     outputs=['ref_in'],
                                     value=onnx.helper.make_tensor(name='const_tensor',
                                                                   data_type=onnx.TensorProto.INT32,
                                                                   dims=ref_array.shape,
                                                                   vals=ref_array.flatten().astype(int)))
    reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])

    graph = helper.make_graph([ref_node, reshape_node],
                              "reshape_test",
                              inputs=[helper.make_tensor_value_info("in",
                                                                    TensorProto.FLOAT, list(in_shape))],
                              outputs=[helper.make_tensor_value_info("out",
                                                                     TensorProto.FLOAT, list(ref_shape))])

    model = helper.make_model(graph, producer_name='reshape_test')

    for target, ctx in ctx_list():
        x = np.random.uniform(size=in_shape).astype('int32')
        tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'float32')

    tvm.testing.assert_allclose(ref_shape, tvm_out.shape) 
Example #10
Source File: shape_inference_test.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def test_constant_fill_with_input(self):  # type: () -> None
        graph = self._make_graph(
            [("X", TensorProto.FLOAT, (2, 3, 4))],
            [make_node('ConstantFill', ['X'], ['out'], dtype=TensorProto.INT32)],
            [])
        self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.INT32, (2, 3, 4))]) 
Example #11
Source File: shape_inference_test.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def test_constant_fill_with_extra_shape(self):  # type: () -> None
        graph = self._make_graph(
            [("X", TensorProto.FLOAT, (2, 3, 4))],
            [make_node('ConstantFill', ['X'], ['out'], dtype=TensorProto.INT32, extra_shape=(5, 6))],
            [])
        self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.INT32, (2, 3, 4, 5, 6))]) 
Example #12
Source File: _operators_nd.py    From coremltools with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _convert_cast(builder, node, graph, err):
    """
    Perform cast operation in CoreML
        e.g. Casting from Float (assumed) to Int maps to Floor Layer
             For Others, add copy layer
    """
    convert_to = node.attrs.get("to")
    convert_to_int = set(
        {
            TensorProto.UINT8,
            TensorProto.INT8,
            TensorProto.UINT16,
            TensorProto.INT32,
            TensorProto.INT64,
            TensorProto.UINT32,
            TensorProto.UINT64,
        }
    )

    ## TODO: Add support for conversion from STRING TO FLOAT
    ## Currently, such input will error out in parsing
    if convert_to in convert_to_int:
        builder.add_floor(
            name=node.name, input_name=node.inputs[0], output_name=node.outputs[0]
        )
    else:
        load_input_constants(builder, node, graph, err)
        builder.add_activation(
            name=node.name,
            non_linearity="LINEAR",
            input_name=node.inputs[0],
            output_name=node.outputs[0],
            params=[1.0, 0.0],
        ) 
Example #13
Source File: optimizer_test.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def _make_fake_loop_op(self,
                           body_nodes,  # type: Sequence[NodeProto]
                           input_types,  # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
                           output_types  # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
                           ):  # type: (...) -> List[NodeProto]
        zero = helper.make_tensor("trip_count_value", TensorProto.INT32, (), [10])
        true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
        # lcd is a dummy loop-carried dependency that only exists because
        # right now the schema checker is broken and assumes a variadic
        # input needs at least one value.
        graph_inputs = [helper.make_tensor_value_info("i", TensorProto.INT32, ()),
                        helper.make_tensor_value_info("cond", TensorProto.BOOL, ())]
        for type, shape, name in input_types:
            graph_inputs.append(helper.make_tensor_value_info("_" + name, type, shape))
        graph_outputs = [helper.make_tensor_value_info("cond", TensorProto.BOOL, ())]
        for type, shape, name in output_types:
            graph_outputs.append(helper.make_tensor_value_info("_" + name, type, shape))
        body_graph = helper.make_graph(body_nodes, "body_graph", graph_inputs,
                                       graph_outputs)
        loop_inputs = ["trip_count", "condition"]
        loop_inputs.extend([name for _, _, name in input_types])
        # TODO: fix checker to accept 0-input variadic inputs
        if len(loop_inputs) == 2:
            loop_inputs.append("")
        loop_outputs = [name for _, _, name in output_types]
        retval_nodes = [
            helper.make_node("Constant", [], ["trip_count"], value=zero),
            helper.make_node("Constant", [], ["condition"], value=true),
            helper.make_node("Loop", loop_inputs, loop_outputs, body=body_graph)
        ]
        return retval_nodes 
Example #14
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def test_expand():

    def _test_expand(name, data, shape, ref_data):
        shape_array = np.array(shape)
        shape_node = onnx.helper.make_node('Constant',
                                    inputs=[],
                                    outputs=['shape'],
                                    value=onnx.helper.make_tensor(name = 'const_tensor',
                                                                  data_type = onnx.TensorProto.INT32,
                                                                  dims = shape_array.shape,
                                                                  vals = shape_array.flatten().astype('int32')))
        expand_node = helper.make_node("Expand", ["in", "shape"], ["out"])

        graph = helper.make_graph([shape_node, expand_node],
                                "expand_test",
                                inputs = [helper.make_tensor_value_info("in",
                                                TensorProto.FLOAT, list(data.shape))],
                                outputs = [helper.make_tensor_value_info("out",
                                                TensorProto.FLOAT, list(ref_data.shape))])

        model = helper.make_model(graph, producer_name=name)

        for target, ctx in ctx_list():
            tvm_out = get_tvm_output(model, data, target, ctx, ref_data.shape, 'float32')

        tvm.testing.assert_allclose(ref_data, tvm_out)

    in_shape = (3, 1)
    shape = (3, 4)
    data = np.random.uniform(size=in_shape).astype(np.float32)
    ref_data = np.tile(data, 4)
    _test_expand('expand_with_dim_unchanged_test', data, shape, ref_data)

    in_shape = (3, 1)
    shape = (2, 1, 6)
    data = np.random.uniform(size=in_shape).astype(np.float32)
    ref_data = data * np.ones(shape, dtype=np.float32)
    _test_expand('expand_with_dim_changed_test', data, shape, ref_data) 
Example #15
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def test_shape():
    in_shape = (4, 3, 3, 4)
    ref_shape = (6, 2, 4, 3)

    ref_array = np.array(ref_shape)
    ref_node = onnx.helper.make_node('Constant',
                                     inputs=[],
                                     outputs=['ref_in'],
                                     value=onnx.helper.make_tensor(name='const_tensor',
                                                                   data_type=onnx.TensorProto.INT32,
                                                                   dims=ref_array.shape,
                                                                   vals=ref_array.flatten().astype(int)))
    reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])

    shape_node = helper.make_node("Shape", ['out'], ['final_out'])

    graph = helper.make_graph([ref_node, reshape_node, shape_node],
                              "shape_test",
                              inputs=[helper.make_tensor_value_info("in",
                                                                    TensorProto.FLOAT, list(in_shape))],
                              outputs=[helper.make_tensor_value_info("final_out",
                                                                     TensorProto.FLOAT, list(ref_shape))])

    model = helper.make_model(graph, producer_name='shape_test')

    for target, ctx in ctx_list():
        x = np.random.uniform(size=in_shape).astype('int32')
        tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'int32')

    tvm.testing.assert_allclose(ref_shape, tvm_out) 
Example #16
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def test_onehot():
    indices_shape = [10]
    indices_array = np.random.randint(
        low=0, high=9, size=indices_shape, dtype='int32')
    depth = 10
    values = np.asarray([0, 1])
    out_np = np.eye(depth)[indices_array.reshape(-1)]

    onehot_node = helper.make_node(
        "OneHot", ["indices", "depth", "values"], ["out"])

    graph = helper.make_graph([onehot_node],
                              "onehot_test",
                              inputs=[helper.make_tensor_value_info("indices",
                                                                    TensorProto.INT32, indices_shape),
                                      helper.make_tensor_value_info("depth",
                                                                    TensorProto.INT32, [1]),
                                      helper.make_tensor_value_info("values",
                                                                    TensorProto.INT32, values.shape)],
                              initializer=[helper.make_tensor("depth", TensorProto.INT32, [1], [depth]),
                                           helper.make_tensor("values", TensorProto.INT32, values.shape, values)],
                              outputs=[helper.make_tensor_value_info("out", TensorProto.INT32, out_np.shape)])

    model = helper.make_model(graph, producer_name="onehot_test")

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(
            model, [indices_array], target, ctx, out_np.shape)
        tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5) 
Example #17
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def verify_constantofshape(input_dim, value, dtype):
    out = np.empty(shape=input_dim, dtype=dtype)
    out.fill(value)

    fill_node = helper.make_node("ConstantOfShape", ["input"], ["output"],
                                 value=helper.make_tensor(
                                     'value',
                                     mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)],
                                     (1, ), (value, )))

    inputs = [
        helper.make_tensor_value_info("input", TensorProto.FLOAT, input_dim)
    ]

    graph = helper.make_graph(
        [fill_node],
        "fill_test",
        inputs,
        outputs=[
            helper.make_tensor_value_info("output", TensorProto.FLOAT,
                                          list(out.shape))
        ],
        initializer=[
            helper.make_tensor("input", TensorProto.INT32, (len(input_dim), ),
                               input_dim)
        ])

    model = helper.make_model(graph, producer_name='fill_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, [], target, ctx, out.shape)

        tvm.testing.assert_allclose(out, tvm_out, rtol=1e-5, atol=1e-5) 
Example #18
Source File: shape_inference_test.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def test_constant_fill(self):  # type: () -> None
        graph = self._make_graph(
            [],
            [make_node('ConstantFill', [], ['out'], dtype=TensorProto.INT32, shape=(3, 4, 5))],
            [])
        self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.INT32, (3, 4, 5))]) 
Example #19
Source File: test_forward.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def test_reshape():
    in_shape = (4, 3, 3, 4)
    ref_shape = (3, 4, 4, 3)

    ref_array = np.array(ref_shape)
    ref_node = onnx.helper.make_node('Constant',
                                 inputs=[],
                                 outputs=['ref_in'],
                                 value=onnx.helper.make_tensor(name = 'const_tensor',
                                                               data_type = onnx.TensorProto.INT32,
                                                               dims = ref_array.shape,
                                                               vals = ref_array.flatten().astype(int)))
    reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])

    graph = helper.make_graph([ref_node, reshape_node],
                              "reshape_test",
                              inputs = [helper.make_tensor_value_info("in",
                                            TensorProto.FLOAT, list(in_shape))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(ref_shape))])

    model = helper.make_model(graph, producer_name='reshape_test')

    for target, ctx in ctx_list():
        x = np.random.uniform(size=in_shape).astype('int32')
        tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'float32')

    np.testing.assert_allclose(ref_shape, tvm_out.shape) 
Example #20
Source File: test_node.py    From onnx-tensorflow with Apache License 2.0 5 votes vote down vote up
def test_constant_of_shape(self):
    if defs.onnx_opset_version() < 9:
      raise unittest.SkipTest(
          "ONNX version {} doesn't support ConstantOfShape.".format(
              defs.onnx_opset_version()))
    v = helper.make_tensor("value", TensorProto.FLOAT, [1], [1])
    node_def = helper.make_node("ConstantOfShape", ["X"], ["Y"], value=v)
    x = np.array([4, 3, 2])
    output = run_node(node_def, inputs=[x])
    np.testing.assert_almost_equal(output["Y"], np.ones(x, dtype=np.float32))
    v = helper.make_tensor("value", TensorProto.INT32, [1], [0])
    node_def = helper.make_node("ConstantOfShape", ["X"], ["Y"], value=v)
    x = np.array([10, 6])
    output = run_node(node_def, inputs=[x])
    np.testing.assert_almost_equal(output["Y"], np.zeros(x, dtype=np.int32)) 
Example #21
Source File: test_node.py    From onnx-tensorflow with Apache License 2.0 5 votes vote down vote up
def test_cast(self):
    if legacy_onnx_pre_ver(1, 2) or legacy_opset_pre_ver(6):
      test_cases = [("FLOAT", tf.float32), ("UINT8", tf.uint8),
                    ("INT8", tf.int8),
                    ("UINT16", tf.uint16), ("INT16", tf.int16),
                    ("INT32", tf.int32), ("INT64", tf.int64), ("BOOL", tf.bool),
                    ("FLOAT16", tf.float16), ("DOUBLE", tf.float64),
                    ("COMPLEX64", tf.complex64), ("COMPLEX128", tf.complex128)]
    else:
      test_cases = [(TensorProto.FLOAT, tf.float32),
                    (TensorProto.UINT8, tf.uint8), (TensorProto.INT8, tf.int8),
                    (TensorProto.UINT16, tf.uint16),
                    (TensorProto.INT16, tf.int16),
                    (TensorProto.INT32, tf.int32),
                    (TensorProto.INT64, tf.int64), (TensorProto.BOOL, tf.bool),
                    (TensorProto.FLOAT16, tf.float16),
                    (TensorProto.DOUBLE, tf.float64),
                    (TensorProto.COMPLEX64, tf.complex64),
                    (TensorProto.COMPLEX128, tf.complex128)]
      if not legacy_opset_pre_ver(9):
        test_cases.append((TensorProto.STRING, tf.string))
    for ty, tf_type in test_cases:
      node_def = helper.make_node("Cast", ["input"], ["output"], to=ty)
      vector = [2, 3]
      output = run_node(node_def, [vector])
      np.testing.assert_equal(output["output"].dtype, tf_type)

    if not legacy_opset_pre_ver(9):
      test_cases2 = [(TensorProto.FLOAT, tf.float32),
                     (TensorProto.INT32, tf.int32),
                     (TensorProto.INT64, tf.int64),
                     (TensorProto.DOUBLE, tf.float64)]
      for ty, tf_type in test_cases2:
        node_def = helper.make_node("Cast", ["input"], ["output"], to=ty)
        vector = ['2', '3']
        output = run_node(node_def, [vector])
        np.testing.assert_equal(output["output"].dtype, tf_type) 
Example #22
Source File: _operators_nd.py    From onnx-coreml with MIT License 5 votes vote down vote up
def _convert_cast(builder, node, graph, err):
    '''
    Perform cast operation in CoreML
        e.g. Casting from Float (assumed) to Int maps to Floor Layer
             For Others, add copy layer
    '''
    convert_to = node.attrs.get('to')
    convert_to_int = set({TensorProto.UINT8, TensorProto.INT8, TensorProto.UINT16, TensorProto.INT32,
                          TensorProto.INT64, TensorProto.UINT32, TensorProto.UINT64})

    ## TODO: Add support for conversion from STRING TO FLOAT
    ## Currently, such input will error out in parsing
    if convert_to in convert_to_int:
        builder.add_floor(
            name=node.name,
            input_name=node.inputs[0],
            output_name=node.outputs[0]
        )
    else:
        load_input_constants(builder, node, graph, err)
        builder.add_activation(
            name=node.name,
            non_linearity = 'LINEAR',
            input_name=node.inputs[0],
            output_name=node.outputs[0],
            params=[1.0, 0.0]
        ) 
Example #23
Source File: converter.py    From onnx-coreml with MIT License 5 votes vote down vote up
def _transform_coreml_dtypes(builder, # type : NeuralNetworkBuilder
                             inputs, # type: List[EdgeInfo]
                             outputs # type: List[EdgeInfo]
                             ):
    # type: (...) -> None

    ''' Make sure ONNX input/output data types are mapped to the equivalent CoreML types
    '''
    for i, input_ in enumerate(inputs):
        onnx_type = input_[1]
        if onnx_type == TensorProto.FLOAT:
            _update_multiarray_to_float32(builder.spec.description.input[i])
        elif onnx_type == TensorProto.DOUBLE:
            continue
        elif onnx_type == TensorProto.INT32 or onnx_type == TensorProto.INT64:
            _update_multiarray_to_int32(builder.spec.description.input[i])
        elif onnx_type == TensorProto.BOOL:
            _update_multiarray_to_float32(builder.spec.description.input[i])
        else:
            raise TypeError("Input must be of of type FLOAT, DOUBLE, INT32 or INT64")

    for i, output_ in enumerate(outputs):
        onnx_type = output_[1]
        if onnx_type == TensorProto.FLOAT:
            _update_multiarray_to_float32(builder.spec.description.output[i])
        elif onnx_type == TensorProto.DOUBLE:
            continue
        elif onnx_type == TensorProto.INT32 or onnx_type == TensorProto.INT64:
            _update_multiarray_to_int32(builder.spec.description.output[i])
        elif onnx_type == TensorProto.BOOL:
            _update_multiarray_to_float32(builder.spec.description.output[i])
        else:
            raise TypeError("Output must be of of type FLOAT, DOUBLE, INT32 or INT64") 
Example #24
Source File: funcs.py    From chainer-compiler with MIT License 5 votes vote down vote up
def call(self, env, x):
        return castto(x.to_tensor(env).name, TensorProto.INT32, env) 
Example #25
Source File: BertOnnxModel.py    From FARM with Apache License 2.0 5 votes vote down vote up
def change_input_to_int32(self):
        original_opset_version = self.model.opset_import[0].version
        graph = self.graph()

        batch_size, sequence_length = self.get_bert_input_shape()
        new_graph_inputs = []

        bert_inputs = self.get_bert_inputs()
        for input in graph.input:
            if input.name in bert_inputs:
                self.remove_cast_int32(input.name)
                input_shape = [batch_size if isinstance(batch_size, int) else 1, sequence_length if isinstance(sequence_length, int) else 128]
                int32_input = onnx.helper.make_tensor_value_info(input.name, TensorProto.INT32, input_shape)
                new_graph_inputs.append(int32_input)
            else:
                new_graph_inputs.append(input)

        graph_def = onnx.helper.make_graph(graph.node,
                                           'int32 inputs',
                                           new_graph_inputs,
                                           graph.output,
                                           initializer=graph.initializer,
                                           value_info=graph.value_info)

        self.model = onnx.helper.make_model(graph_def, producer_name='bert model optimizer')

        if isinstance(batch_size, str) or isinstance(sequence_length, str):
            self.use_dynamic_axes(batch_size if isinstance(batch_size, str) else None, sequence_length if isinstance(sequence_length, str) else None)

        # restore opset version
        self.model.opset_import[0].version = original_opset_version 
Example #26
Source File: BertOnnxModel.py    From FARM with Apache License 2.0 5 votes vote down vote up
def remove_cast_int32(self, input_name):
        input_name_to_nodes = self.input_name_to_nodes()
        nodes = input_name_to_nodes[input_name]
        for node in nodes:
            if node.op_type == "Cast":
                is_int32 = False
                for att in node.attribute:
                    if att.name == 'to' and att.i == int(TensorProto.INT32):
                        is_int32 = True
                        break
                if is_int32:
                    output_name = node.output[0]
                    self.remove_node(node)
                    self.replace_input_of_all_nodes(output_name, input_name) 
Example #27
Source File: BertOnnxModel.py    From FARM with Apache License 2.0 5 votes vote down vote up
def cast_graph_input_to_int32(self, input_name):
        graph_input = self.find_graph_input(input_name)
        if graph_input is not None and graph_input.type.tensor_type.elem_type != TensorProto.INT32:
            cast_output, cast_node = self.cast_input_to_int32(input_name)
            logger.debug(f"Casted graph input {input_name} to int32")
            return True, cast_output

        logger.debug(f"Did not cast graph input {input_name} to int32: found {graph_input is not None}")
        return False, input_name 
Example #28
Source File: test_forward.py    From training_results_v0.6 with Apache License 2.0 4 votes vote down vote up
def verify_argmax(input_dim, axis=None, keepdims=None):
    def _argmax_numpy(data, axis=0, keepdims=True):
        result = np.argmax(data, axis=axis)
        if (keepdims == 1):
            result = np.expand_dims(result, axis)
        return result.astype(data.dtype)

    a_np1 = np.random.uniform(-10, 10, input_dim).astype(np.int32)

    if keepdims is None and axis is None:
        b_np = _argmax_numpy(a_np1)
        node = onnx.helper.make_node('ArgMax',
                                     inputs=['a_np1'],
                                     outputs=['out'])
    elif axis is None:
        b_np = _argmax_numpy(a_np1, keepdims=keepdims)
        node = onnx.helper.make_node('ArgMax',
                                     inputs=['a_np1'],
                                     outputs=['out'],
                                     keepdims=keepdims)
    elif keepdims is None:
        b_np = _argmax_numpy(a_np1, axis=axis)
        node = onnx.helper.make_node('ArgMax',
                                     inputs=['a_np1'],
                                     outputs=['out'],
                                     axis=axis)
    else:
        b_np = _argmax_numpy(a_np1, axis=axis, keepdims=keepdims)
        node = onnx.helper.make_node('ArgMax',
                                     inputs=['a_np1'],
                                     outputs=['out'],
                                     axis=axis,
                                     keepdims=keepdims)

    graph = helper.make_graph([node],
                              "argmax_test",
                              inputs = [helper.make_tensor_value_info("a_np1",
                                            TensorProto.INT32, list(a_np1.shape))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.INT32, list(b_np.shape))])

    model = helper.make_model(graph, producer_name='argmax_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, [a_np1], target, ctx, b_np.shape, b_np.dtype)
        np.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5) 
Example #29
Source File: test_forward.py    From training_results_v0.6 with Apache License 2.0 4 votes vote down vote up
def verify_argmin(input_dim, axis=None, keepdims=None):
    def _argmin_numpy(data, axis=0, keepdims=True):
        result = np.argmin(data, axis=axis)
        if (keepdims == 1):
            result = np.expand_dims(result, axis)
        return result.astype(data.dtype)

    a_np1 = np.random.uniform(-10, 10, input_dim).astype(np.int32)
    if keepdims is None and axis is None:
        b_np = _argmin_numpy(a_np1)
        node = onnx.helper.make_node('ArgMin',
                                     inputs=['a_np1'],
                                     outputs=['out'])
    elif axis is None:
        b_np = _argmin_numpy(a_np1, keepdims=keepdims)
        node = onnx.helper.make_node('ArgMin',
                                     inputs=['a_np1'],
                                     outputs=['out'],
                                     keepdims=keepdims)
    elif keepdims is None:
        b_np = _argmin_numpy(a_np1, axis=axis)
        node = onnx.helper.make_node('ArgMin',
                                     inputs=['a_np1'],
                                     outputs=['out'],
                                     axis=axis)
    else:
        b_np = _argmin_numpy(a_np1, axis=axis, keepdims=keepdims)
        node = onnx.helper.make_node('ArgMin',
                                     inputs=['a_np1'],
                                     outputs=['out'],
                                     axis=axis,
                                     keepdims=keepdims)
    graph = helper.make_graph([node],
                              "argmin_test",
                              inputs = [helper.make_tensor_value_info("a_np1",
                                            TensorProto.INT32, list(a_np1.shape))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.INT32, list(b_np.shape))])

    model = helper.make_model(graph, producer_name='argmin_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, [a_np1], target, ctx, b_np.shape, b_np.dtype)
        np.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5) 
Example #30
Source File: test_node.py    From onnx-tensorflow with Apache License 2.0 4 votes vote down vote up
def test_constant(self):
    shape = [16, 16]
    values = np.random.randn(*shape).flatten().astype(float)
    const2_onnx = helper.make_tensor("const2", TensorProto.DOUBLE, shape,
                                     values)
    node_def = helper.make_node("Constant", [], ["Y"], value=const2_onnx)
    output = run_node(node_def, [])
    np.testing.assert_equal(output["Y"].shape, shape)
    np.testing.assert_almost_equal(output["Y"].flatten(), values)

    # test sparse tensor
    if not legacy_opset_pre_ver(11):
      expected = np.array([[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]])
      x = np.array([[0, 0], [1, 2]]).flatten().astype(np.int64)
      values = helper.make_tensor("values", TensorProto.INT32, [2], [1, 2])
      indices = helper.make_tensor("indices", TensorProto.INT64, [2, 2], x)
      a = helper.make_sparse_tensor(values, indices, [3, 4])
      node_def = helper.make_node("Constant", [], ["Y"], sparse_value=a)
      output = run_node(node_def, [])
      b = tf.sparse.SparseTensor(output["Y"].indices, output["Y"].values, output["Y"].dense_shape)
      b = tf.sparse.to_dense(b)
      result = b.numpy()
      np.testing.assert_equal(result, expected)

    if not legacy_opset_pre_ver(12):
      float_attr = 1.0
      floats_attr = [1.0, 2.0, 3.0]
      int_attr = np.int64(123)
      ints_attr = [np.int64(4), np.int64(5), np.int64(6)]
      string_attr = 'The Cat in the Hat'
      strings_attr = [
          'Green Eggs and Ham', 'How the Grinch Stole Christmas!',
          'The Cat in the Hat Comes Back'
      ]
      testcases = [(helper.make_node("Constant", [], ["Y"],
                                     value_float=float_attr), float_attr),
                   (helper.make_node("Constant", [], ["Y"],
                                     value_floats=floats_attr), floats_attr),
                   (helper.make_node("Constant", [], ["Y"],
                                     value_int=int_attr), int_attr),
                   (helper.make_node("Constant", [], ["Y"],
                                     value_ints=ints_attr), ints_attr),
                   (helper.make_node("Constant", [], ["Y"],
                                     value_string=string_attr), string_attr),
                   (helper.make_node("Constant", [], ["Y"],
                                     value_strings=strings_attr), strings_attr)]
      for node_def, expected in testcases:
        output = run_node(node_def, [])
        if isinstance(expected, str):
          np.testing.assert_string_equal(output["Y"].decode('UTF-8'), expected)
        elif isinstance(expected, list) and isinstance(expected[0], str):
          for i in range(len(expected)):
            np.testing.assert_string_equal(output['Y'][i].decode('UTF-8'),
                                           expected[i])
        else:
          np.testing.assert_equal(output["Y"], expected)