Python coremltools.models.neural_network.NeuralNetworkBuilder() Examples
The following are 30
code examples of coremltools.models.neural_network.NeuralNetworkBuilder().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
coremltools.models.neural_network
, or try the search function
.
Example #1
Source File: test_model.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_nn_classifier_util(self): input_features = [("data", datatypes.Array(3,))] output_features = [("out", datatypes.Array(3,))] builder = NeuralNetworkBuilder( input_features, output_features, disable_rank5_shape_mapping=True ) builder.add_activation("linear", "LINEAR", "data", "out") spec = builder.spec mlmodel = MLModel(spec) mlmodel = make_nn_classifier( mlmodel, class_labels=["a", "b", "c"], predicted_feature_name="out_confidence", predicted_probabilities_output="out", ) out_dict = mlmodel.predict({"data": np.array([4.0, 5.5, 6.0])}, useCPUOnly=True) self.assertEqual(out_dict["out_confidence"], "c") self.assertEqual( mlmodel.get_spec().WhichOneof("Type"), "neuralNetworkClassifier" )
Example #2
Source File: test_forward.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def verify_MultiplyLayerParams(input_dim, alpha): dtype = 'float32' a_np1 = np.random.uniform(size=input_dim).astype(dtype) a_np2 = np.random.uniform(size=input_dim).astype(dtype) b_np = np.multiply(a_np1, a_np2) * alpha inputs = [('input1', datatypes.Array(*input_dim)), ('input2', datatypes.Array(*input_dim))] output = [('output', datatypes.Array(*b_np.shape))] builder = NeuralNetworkBuilder(inputs, output) builder.add_elementwise(name='Mul', alpha=alpha, input_names=['input1', 'input2'], output_name='output', mode='MULTIPLY') model = cm.models.MLModel(builder.spec) for target, ctx in ctx_list(): out = run_tvm_graph(model, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype) np.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #3
Source File: test_forward.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def verify_UpsampleLayerParams(input_dim, scale, mode): dtype = "float32" a_np = np.full(input_dim, 1, dtype=dtype) if mode == 'NN': b_np = topi.testing.upsampling_python(a_np, scale) else: new_h = input_dim[2] * scale new_w = input_dim[3] * scale b_np = topi.testing.bilinear_resize_python(a_np, (new_h, new_w), 'NCHW') input = [('input', datatypes.Array(*input_dim))] output = [('output', datatypes.Array(*b_np.shape))] builder = NeuralNetworkBuilder(input, output) builder.add_upsample(name='Upsample', scaling_factor_h=scale, scaling_factor_w=scale, mode=mode, input_name='input', output_name='output') model = cm.models.MLModel(builder.spec) for target, ctx in ctx_list(): out = run_tvm_graph(model, a_np, 'input', b_np.shape, dtype) np.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #4
Source File: test_forward.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def verify_ConcatLayerParams(input1_dim, input2_dim): dtype = 'float32' a_np1 = np.random.uniform(size=input1_dim).astype(dtype) a_np2 = np.random.uniform(size=input2_dim).astype(dtype) b_np = np.concatenate((a_np1, a_np2), axis=1) inputs = [('input1', datatypes.Array(*input1_dim)), ('input2', datatypes.Array(*input2_dim))] output = [('output', datatypes.Array(*b_np.shape))] builder = NeuralNetworkBuilder(inputs, output) builder.add_elementwise(name='Concate', input_names=['input1', 'input2'], output_name='output', mode='CONCAT') model = cm.models.MLModel(builder.spec) for target, ctx in ctx_list(): out = run_tvm_graph(model, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype) np.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #5
Source File: test_forward.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def verify_average(input_dim1, input_dim2, axis=0): dtype = 'float32' a_np1 = np.random.uniform(size=input_dim1).astype(dtype) a_np2 = np.random.uniform(size=input_dim2).astype(dtype) b_np = np.mean((a_np1, a_np2), axis=axis) inputs = [('input1', datatypes.Array(*input_dim1)), ('input2', datatypes.Array(*input_dim2))] output = [('output', datatypes.Array(*b_np.shape))] builder = NeuralNetworkBuilder(inputs, output) builder.add_elementwise(name='MEAN', input_names=['input1', 'input2'], output_name='output', mode='AVE') model = cm.models.MLModel(builder.spec) for target, ctx in ctx_list(): out = run_tvm_graph(model, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype) np.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #6
Source File: test_mlmodel_passes.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_load_constant_remove(self): input_features = [("data", datatypes.Array(*(3, 4)))] output_features = [("out", None)] builder = neural_network.NeuralNetworkBuilder( input_features, output_features, disable_rank5_shape_mapping=True ) builder.add_activation("relu1", "RELU", "data", "relu1") builder.add_load_constant_nd( "const1", "c1", constant_value=np.ones((5,)), shape=(5,) ) builder.add_activation("relu2", "RELU", "relu1", "out") builder.add_load_constant_nd( "const2", "c2", constant_value=np.ones((5,)), shape=(5,) ) builder.add_load_constant_nd( "const3", "c3", constant_value=np.ones((5,)), shape=(5,) ) spec = builder.spec np.testing.assert_equal(5, len(spec.neuralNetwork.layers)) remove_disconnected_layers(spec) np.testing.assert_equal(2, len(spec.neuralNetwork.layers))
Example #7
Source File: test_mlmodel_passes.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_remove_single_identity_transpose(self): # A single identity transpose (like 0,1,2) should also be removed input_shape = (1, 10, 5) input_features = [("data", datatypes.Array(*input_shape))] output_features = [("out", None)] builder = neural_network.NeuralNetworkBuilder( input_features, output_features, disable_rank5_shape_mapping=True ) builder.add_transpose( name="uselss_transpose", axes=[0, 1, 2], input_name="data", output_name="useless_transpose_out", ) builder.add_activation( name="relu", non_linearity="RELU", input_name="useless_transpose_out", output_name="out", ) self._test_builder(builder, input_shape, 1)
Example #8
Source File: test_mlmodel_passes.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_output_edge_case(self): # For now for safety purpose, the node which are output should't be merged input_shape = (1, 10, 5) input_features = [("data", datatypes.Array(*input_shape))] output_features = [("out", None)] builder = neural_network.NeuralNetworkBuilder( input_features, output_features, disable_rank5_shape_mapping=True ) builder.add_transpose( name="first_transpose", axes=[2, 0, 1], input_name="data", output_name="first_transpose_out", ) builder.add_transpose( name="second_transpose", axes=[1, 2, 0], input_name="first_transpose_out", output_name="out", ) self._test_builder(builder, input_shape, 2)
Example #9
Source File: test_mlmodel_passes.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_remove_three_transpose(self): # Three transpose layer which can be removed input_shape = (1, 10, 5) input_features = [("data", datatypes.Array(*input_shape))] output_features = [("out", None)] builder = neural_network.NeuralNetworkBuilder( input_features, output_features, disable_rank5_shape_mapping=True ) transpose = [[2, 1, 0], [1, 0, 2], [2, 0, 1]] input_name = "data" for i, axes in enumerate(transpose): name = "transpose_" + str(i) output_name = name + "_out" builder.add_transpose( name=name, axes=axes, input_name=input_name, output_name=output_name ) input_name = output_name builder.add_activation( name="relu", non_linearity="RELU", input_name=input_name, output_name="out" ) self._test_builder(builder, input_shape, 1)
Example #10
Source File: test_mlmodel_passes.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_load_constant_remove(self): input_features = [("data", datatypes.Array(*(3, 4)))] output_features = [("out", None)] builder = neural_network.NeuralNetworkBuilder( input_features, output_features, disable_rank5_shape_mapping=True ) builder.add_activation("relu1", "RELU", "data", "relu1") builder.add_load_constant_nd( "const1", "c1", constant_value=np.ones((5,)), shape=(5,) ) builder.add_activation("relu2", "RELU", "relu1", "out") builder.add_load_constant_nd( "const2", "c2", constant_value=np.ones((5,)), shape=(5,) ) builder.add_load_constant_nd( "const3", "c3", constant_value=np.ones((5,)), shape=(5,) ) spec = builder.spec np.testing.assert_equal(5, len(spec.neuralNetwork.layers)) remove_disconnected_layers(spec) np.testing.assert_equal(2, len(spec.neuralNetwork.layers))
Example #11
Source File: test_nn_builder.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_linear_quant_inner_product_3bit(self): W = np.reshape(np.arange(6), (2, 3)).astype(np.uint8) input_features = [("data", datatypes.Array(3))] output_features = [("probs", None)] builder = NeuralNetworkBuilder(input_features, output_features) builder.add_inner_product( name="ip1", W=_convert_array_to_nbit_quantized_bytes(W.flatten(), 3).tobytes(), b=None, input_channels=3, output_channels=2, has_bias=False, input_name="data", output_name="probs", quantization_type="linear", nbits=3, quant_scale=[11.0, 2.0], quant_bias=[-2.0, 10.0], ) mlmodel = MLModel(builder.spec) data = np.array([1.0, 3.0, 5.0]) data_dict = {"data": data} probs = mlmodel.predict(data_dict)["probs"] expected_out = np.array([125, 170]) self.assertTrue(np.allclose(probs.flatten(), expected_out.flatten()))
Example #12
Source File: test_nn_builder.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _build_nn_with_one_ip_layer(self): input_features = [("data", datatypes.Array(3))] output_features = [("out", None)] builder = NeuralNetworkBuilder( input_features, output_features, disable_rank5_shape_mapping=True ) w = np.random.uniform(-0.5, 0.5, (3, 3)) builder.add_inner_product( name="ip1", W=w, b=None, input_channels=3, output_channels=3, has_bias=False, input_name="input", output_name="hidden", ) return builder
Example #13
Source File: test_nn_builder.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_undefined_shape_single_output(self): W = np.ones((3, 3)) input_features = [("data", datatypes.Array(3))] output_features = [("probs", None)] builder = NeuralNetworkBuilder(input_features, output_features) builder.add_inner_product( name="ip1", W=W, b=None, input_channels=3, output_channels=3, has_bias=False, input_name="data", output_name="probs", ) mlmodel = MLModel(builder.spec) data = np.ones((3,)) data_dict = {"data": data} probs = mlmodel.predict(data_dict)["probs"] self.assertTrue(np.allclose(probs, np.ones(3) * 3))
Example #14
Source File: test_model.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_multiarray_type_convert_to_float(self): input_features = [("data", datatypes.Array(2))] output_features = [("out", datatypes.Array(2))] builder = NeuralNetworkBuilder(input_features, output_features) builder.add_ceil("ceil", "data", "out") spec = builder.spec self.assertEqual( spec.description.input[0].type.multiArrayType.dataType, Model_pb2.ArrayFeatureType.DOUBLE, ) self.assertEqual( spec.description.output[0].type.multiArrayType.dataType, Model_pb2.ArrayFeatureType.DOUBLE, ) convert_double_to_float_multiarray_type(spec) self.assertEqual( spec.description.input[0].type.multiArrayType.dataType, Model_pb2.ArrayFeatureType.FLOAT32, ) self.assertEqual( spec.description.output[0].type.multiArrayType.dataType, Model_pb2.ArrayFeatureType.FLOAT32, )
Example #15
Source File: test_model.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_multiarray_to_image_input_util(self): H, W, C = 1, 1, 3 input_features = [("data", datatypes.Array(C, H, W))] output_features = [("out", datatypes.Array(C, H, W))] builder = NeuralNetworkBuilder( input_features, output_features, disable_rank5_shape_mapping=True ) builder.add_activation("linear", "LINEAR", "data", "out") spec = builder.spec mlmodel = MLModel(spec) mlmodel = make_image_input( mlmodel, "data", red_bias=-5, green_bias=-6, blue_bias=-2.5, scale=10.0, image_format="NCHW", ) x = np.array([4, 2, 5], dtype=np.uint8) x = np.reshape(x, (H, W, C)) pil_img = PIL.Image.fromarray(x) y = mlmodel.predict({"data": pil_img}, useCPUOnly=True)["out"] self.assertEqual(y.shape, (C, H, W)) np.testing.assert_almost_equal(y.flatten(), [35.0, 14.0, 47.5])
Example #16
Source File: test_forward.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def verify_AddLayerParams(input_dim, alpha=2): dtype = 'float32' a_np1 = np.random.uniform(size=input_dim).astype(dtype) a_np2 = np.random.uniform(size=input_dim).astype(dtype) b_np = np.add(a_np1, a_np2) + alpha inputs = [('input1', datatypes.Array(*input_dim)), ('input2', datatypes.Array(*input_dim))] output = [('output', datatypes.Array(*b_np.shape))] builder = NeuralNetworkBuilder(inputs, output) builder.add_elementwise(name='Add', alpha=alpha, input_names=['input1', 'input2'], output_name='output', mode='ADD') model = cm.models.MLModel(builder.spec) for target, ctx in ctx_list(): out = run_tvm_graph(model, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype) np.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #17
Source File: test_model.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_nn_classifier_util_file(self): input_features = [("data", datatypes.Array(3,))] output_features = [("out", datatypes.Array(3,))] builder = NeuralNetworkBuilder( input_features, output_features, disable_rank5_shape_mapping=True ) builder.add_activation("linear", "LINEAR", "data", "out") spec = builder.spec mlmodel = MLModel(spec) class_labels = ["a", "b", "c"] with tempfile.NamedTemporaryFile(mode="w", suffix=".txt") as f: f.write("\n".join(class_labels)) f.flush() mlmodel = make_nn_classifier( mlmodel, class_labels=f.name, predicted_feature_name="out_confidence", predicted_probabilities_output="out", ) out_dict = mlmodel.predict({"data": np.array([4.0, 5.5, 6.0])}, useCPUOnly=True) self.assertEqual(out_dict["out_confidence"], "c") self.assertEqual( mlmodel.get_spec().WhichOneof("Type"), "neuralNetworkClassifier" )
Example #18
Source File: vgsl.py From kraken with Apache License 2.0 | 6 votes |
def save_model(self, path: str): """ Serializes the model into path. Args: path (str): Target destination """ inputs = [('input', datatypes.Array(*self.input))] outputs = [('output', datatypes.Array(*self.output))] net_builder = NeuralNetworkBuilder(inputs, outputs) input = 'input' prev_device = next(next(self.nn.children()).parameters()).device try: for name, layer in self.nn.to('cpu').named_children(): input = layer.serialize(name, input, net_builder) mlmodel = MLModel(net_builder.spec) mlmodel.short_description = 'kraken recognition model' mlmodel.user_defined_metadata['vgsl'] = '[' + ' '.join(self.named_spec) + ']' if self.codec: mlmodel.user_defined_metadata['codec'] = json.dumps(self.codec.c2l) if self.user_metadata: mlmodel.user_defined_metadata['kraken_meta'] = json.dumps(self.user_metadata) mlmodel.save(path) finally: self.nn.to(prev_device)
Example #19
Source File: coreml.py From incubator-tvm with Apache License 2.0 | 6 votes |
def __init__(self, model_name, function): import coremltools from coremltools.models.neural_network import NeuralNetworkBuilder ExprVisitor.__init__(self) self.model_name = model_name self.function = function self.out_map = {} self.model_inputs_ = [] self.buf_idx_ = 0 # Update inputs and outputs after we visit all the nodes. # Set dummy values for now. # TODO: support multiple outputs inputs = [('', coremltools.models.datatypes.Array(1,)) for _ in self.function.params] outputs = [('', coremltools.models.datatypes.Array(1,))] self.builder = NeuralNetworkBuilder(inputs, outputs, disable_rank5_shape_mapping=True)
Example #20
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 6 votes |
def verify_AddLayerParams(input_dim, alpha=2): dtype = 'float32' a_np1 = np.random.uniform(size=input_dim).astype(dtype) a_np2 = np.random.uniform(size=input_dim).astype(dtype) b_np = np.add(a_np1, a_np2) + alpha inputs = [('input1', datatypes.Array(*input_dim)), ('input2', datatypes.Array(*input_dim))] output = [('output', datatypes.Array(*b_np.shape))] builder = NeuralNetworkBuilder(inputs, output) builder.add_elementwise(name='Add', alpha=alpha, input_names=['input1', 'input2'], output_name='output', mode='ADD') model = cm.models.MLModel(builder.spec) for target, ctx in ctx_list(): out = run_tvm_graph(model, target, ctx, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype) tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #21
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 6 votes |
def verify_MultiplyLayerParams(input_dim, alpha): dtype = 'float32' a_np1 = np.random.uniform(size=input_dim).astype(dtype) a_np2 = np.random.uniform(size=input_dim).astype(dtype) b_np = np.multiply(a_np1, a_np2) * alpha inputs = [('input1', datatypes.Array(*input_dim)), ('input2', datatypes.Array(*input_dim))] output = [('output', datatypes.Array(*b_np.shape))] builder = NeuralNetworkBuilder(inputs, output) builder.add_elementwise(name='Mul', alpha=alpha, input_names=['input1', 'input2'], output_name='output', mode='MULTIPLY') model = cm.models.MLModel(builder.spec) for target, ctx in ctx_list(): out = run_tvm_graph(model, target, ctx, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype) tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #22
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 6 votes |
def verify_ConcatLayerParams(input1_dim, input2_dim): dtype = 'float32' a_np1 = np.random.uniform(size=input1_dim).astype(dtype) a_np2 = np.random.uniform(size=input2_dim).astype(dtype) b_np = np.concatenate((a_np1, a_np2), axis=1) inputs = [('input1', datatypes.Array(*input1_dim)), ('input2', datatypes.Array(*input2_dim))] output = [('output', datatypes.Array(*b_np.shape))] builder = NeuralNetworkBuilder(inputs, output) builder.add_elementwise(name='Concate', input_names=['input1', 'input2'], output_name='output', mode='CONCAT') model = cm.models.MLModel(builder.spec) for target, ctx in ctx_list(): out = run_tvm_graph(model, target, ctx, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype) tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #23
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 6 votes |
def verify_UpsampleLayerParams(input_dim, scale, mode): dtype = "float32" a_np = np.full(input_dim, 1, dtype=dtype) if mode == 'NN': b_np = topi.testing.upsampling_python(a_np, (scale, scale)) else: new_h = input_dim[2] * scale new_w = input_dim[3] * scale b_np = topi.testing.bilinear_resize_python(a_np, (new_h, new_w), 'NCHW') input = [('input', datatypes.Array(*input_dim))] output = [('output', datatypes.Array(*b_np.shape))] builder = NeuralNetworkBuilder(input, output) builder.add_upsample(name='Upsample', scaling_factor_h=scale, scaling_factor_w=scale, mode=mode, input_name='input', output_name='output') model = cm.models.MLModel(builder.spec) for target, ctx in ctx_list(): out = run_tvm_graph(model, target, ctx, a_np, 'input', b_np.shape, dtype) tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #24
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 6 votes |
def verify_lrn(input_dim, size, bias, alpha, beta): dtype = "float32" axis=1 a_np = np.random.uniform(size=input_dim).astype(dtype) b_np = topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta) input = [('input', datatypes.Array(*input_dim))] output = [('output', datatypes.Array(*b_np.shape))] builder = NeuralNetworkBuilder(input, output) builder.add_lrn(name='LRN', input_name='input', output_name='output', alpha=alpha, beta=beta, k=bias, local_size=size) model = cm.models.MLModel(builder.spec) for target, ctx in ctx_list(): out = run_tvm_graph(model, target, ctx, a_np, 'input', b_np.shape, dtype) tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #25
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 6 votes |
def verify_average(input_dim1, input_dim2, axis=0): dtype = 'float32' a_np1 = np.random.uniform(size=input_dim1).astype(dtype) a_np2 = np.random.uniform(size=input_dim2).astype(dtype) b_np = np.mean((a_np1, a_np2), axis=axis) inputs = [('input1', datatypes.Array(*input_dim1)), ('input2', datatypes.Array(*input_dim2))] output = [('output', datatypes.Array(*b_np.shape))] builder = NeuralNetworkBuilder(inputs, output) builder.add_elementwise(name='MEAN', input_names=['input1', 'input2'], output_name='output', mode='AVE') model = cm.models.MLModel(builder.spec) for target, ctx in ctx_list(): out = run_tvm_graph(model, target, ctx, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype) tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #26
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 6 votes |
def verify_max(input_dim): dtype = 'float32' a_np1 = np.random.uniform(size=input_dim).astype(dtype) a_np2 = np.random.uniform(size=input_dim).astype(dtype) a_np3 = np.random.uniform(size=input_dim).astype(dtype) b_np = np.max((a_np1, a_np2, a_np3), axis=0) inputs = [('input1', datatypes.Array(*input_dim)), ('input2', datatypes.Array(*input_dim)), ('input3', datatypes.Array(*input_dim))] output = [('output', datatypes.Array(*b_np.shape))] builder = NeuralNetworkBuilder(inputs, output) builder.add_elementwise(name='Max', input_names=['input1', 'input2', 'input3'], output_name='output', mode='MAX') model = cm.models.MLModel(builder.spec) for target, ctx in ctx_list(): out = run_tvm_graph(model, target, ctx, [a_np1, a_np2, a_np3], ['input1', 'input2', 'input3'], b_np.shape, dtype) tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #27
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 6 votes |
def verify_min(input_dim): dtype = 'float32' a_np1 = np.random.uniform(size=input_dim).astype(dtype) a_np2 = np.random.uniform(size=input_dim).astype(dtype) a_np3 = np.random.uniform(size=input_dim).astype(dtype) b_np = np.min((a_np1, a_np2, a_np3), axis=0) inputs = [('input1', datatypes.Array(*input_dim)), ('input2', datatypes.Array(*input_dim)), ('input3', datatypes.Array(*input_dim))] output = [('output', datatypes.Array(*b_np.shape))] builder = NeuralNetworkBuilder(inputs, output) builder.add_elementwise(name='Min', input_names=['input1', 'input2', 'input3'], output_name='output', mode='MIN') model = cm.models.MLModel(builder.spec) for target, ctx in ctx_list(): out = run_tvm_graph(model, target, ctx, [a_np1, a_np2, a_np3], ['input1', 'input2', 'input3'], b_np.shape, dtype) tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #28
Source File: test_cml_AllNeuralNetworkConverters.py From onnxmltools with MIT License | 6 votes |
def test_unary_function_converter(self): input_dim = (3,) output_dim = (3,) input = [('input', datatypes.Array(*input_dim))] output = [('output', datatypes.Array(*output_dim))] builder = NeuralNetworkBuilder(input, output) builder.add_unary(name='Unary1', input_name='input', output_name='mid1', mode='abs') builder.add_unary(name='Unary2', input_name='mid1', output_name='mid2', mode='sqrt') builder.add_unary(name='Unary3', input_name='mid2', output_name='mid3', mode='rsqrt') builder.add_unary(name='Unary4', input_name='mid3', output_name='mid4', mode='inverse') builder.add_unary(name='Unary5', input_name='mid4', output_name='mid5', mode='power', alpha=2) builder.add_unary(name='Unary6', input_name='mid5', output_name='mid6', mode='exp') builder.add_unary(name='Unary7', input_name='mid6', output_name='mid7', mode='log') builder.add_unary(name='Unary8', input_name='mid7', output_name='output', mode='threshold') model_onnx = convert_coreml(builder.spec) self.assertTrue(model_onnx is not None)
Example #29
Source File: test_cml_AllNeuralNetworkConverters.py From onnxmltools with MIT License | 6 votes |
def test_convolution_converter(self): input_dim = (1, 1, 4, 2) output_dim = (1, 1, 4, 2) input = [('input', datatypes.Array(*input_dim))] output = [('output', datatypes.Array(*output_dim))] builder = NeuralNetworkBuilder(input, output) weights = numpy.zeros(shape=(1, 1, 2, 2)) weights[:] = [[1, 1], [-1, -1]] bias = numpy.zeros(shape=(1,)) bias[:] = 100 builder.add_convolution(name='Conv', kernel_channels=1, output_channels=1, height=2, width=2, stride_height=1, stride_width=1, border_mode='same', groups=1, W=weights, b=bias, has_bias=True, input_name='input', output_name='output', is_deconv=True, output_shape=(1, 1, 4, 2)) model_onnx = convert_coreml(builder.spec) self.assertTrue(model_onnx is not None)
Example #30
Source File: test_cml_AllNeuralNetworkConverters.py From onnxmltools with MIT License | 6 votes |
def test_batchnorm_converter(self): input_dim = (3,) output_dim = (3,) input = [('input', datatypes.Array(*input_dim))] output = [('output', datatypes.Array(*output_dim))] builder = NeuralNetworkBuilder(input, output) gamma = numpy.ndarray(shape=(3,)) gamma[:] = [-1, 0, 1] beta = numpy.ndarray(shape=(3,)) beta[:] = [10, 20, 30] mean = numpy.ndarray(shape=(3,)) mean[:] = [0, 0, 0] variance = numpy.ndarray(shape=(3,)) variance[:] = [1, 1, 1] builder.add_batchnorm(name='BatchNormalize', channels=3, gamma=gamma, beta=beta, mean=mean, variance=variance, input_name='input', output_name='output') model_onnx = convert_coreml(builder.spec) self.assertTrue(model_onnx is not None)