Python onnx.save_model() Examples

The following are 17 code examples of onnx.save_model(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module onnx , or try the search function .
Example #1
Source File: onnx_model_artifact.py    From BentoML with Apache License 2.0 6 votes vote down vote up
def save(self, dst):
        if self._onnx_model_path:
            shutil.copyfile(
                self._onnx_model_path, self.spec._saved_model_file_path(dst)
            )
        elif self._model_proto:
            try:
                import onnx
            except ImportError:
                raise MissingDependencyException(
                    '"onnx" package is required for packing with OnnxModelArtifact'
                )
            onnx.save_model(self._model_proto, self.spec._saved_model_file_path(dst))
        else:
            raise InvalidArgument(
                'onnx.ModelProto or a model file path is required to pack an '
                'OnnxModelArtifact'
            ) 
Example #2
Source File: cli.py    From keras-onnx with MIT License 6 votes vote down vote up
def main(input_file, output_file=None, opset=None, channel_first=None):
    """
    A command line interface for Keras model to ONNX converter.
    :param input_file: the original model file path, could be a folder name of TF saved model
    :param output_file: the converted ONNX model file path (optional)
    :param opset: the target opset for the ONNX model.
    :param channel_first: the input name needs to be transposed as NCHW
    :return:
    """

    if not os.path.exists(input_file):
        print("File or directory name '{}' is invalid!".format(input_file))
        return

    file_ext = os.path.splitext(input_file)
    if output_file is None:
        output_file = file_ext[0] + '.onnx'

    assert file_ext[-1] == '.h5', "Unknown file extension."
    kml = tf.keras.models.load_model(input_file)
    oxml = convert_keras(kml, kml.model, '', opset, channel_first)
    onnx.save_model(oxml, output_file) 
Example #3
Source File: onnx.py    From mlflow with Apache License 2.0 6 votes vote down vote up
def get_default_conda_env():
    """
    :return: The default Conda environment for MLflow Models produced by calls to
             :func:`save_model()` and :func:`log_model()`.
    """
    import onnx
    import onnxruntime
    return _mlflow_conda_env(
        additional_conda_deps=None,
        additional_pip_deps=[
            "onnx=={}".format(onnx.__version__),
            # The ONNX pyfunc representation requires the OnnxRuntime
            # inference engine. Therefore, the conda environment must
            # include OnnxRuntime
            "onnxruntime=={}".format(onnxruntime.__version__),
        ],
        additional_conda_channels=None,
    ) 
Example #4
Source File: test_opt.py    From onnxconverter-common with MIT License 5 votes vote down vote up
def test_merge_common(self):
        val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)

        nodes = []
        nodes[0:] = \
            [helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
                name='const0',
                data_type=onnx_proto.TensorProto.FLOAT,
                dims=val.shape,
                vals=val.flatten().astype(float)),
                              name="0")]
        nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'], name="1")]
        nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'], name="2")]
        nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'], name="3")]
        nodes[4:] = [helper.make_node('LeakyRelu', ['max0'], ['leak0'], name="4")]
        nodes[5:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak1'], name="5")]
        nodes[6:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak2'], name="6")]
        nodes[7:] = [helper.make_node('Cast', ['leak1'], ['cast0'], to=6, name="7")]
        nodes[8:] = [helper.make_node('Cast', ['cast0'], ['cast1'], to=1, name="8")]
        nodes[9:] = [helper.make_node('Cast', ['leak2'], ['cast2'], to=6, name="9")]
        nodes[10:] = [helper.make_node('Cast', ['cast2'], ['cast3'], to=7, name="10")]
        nodes[11:] = [helper.make_node('Cast', ['cast3'], ['cast4'], to=1, name="11")]
        nodes[12:] = [helper.make_node('Add', ['cast1', 'cast4'], ['add0'], name="12")]
        nodes[13:] = [helper.make_node('Transpose', ['add0'], ['tranpose2'], perm=[0, 3, 1, 2], name="13")]
        nodes[14:] = [helper.make_node('Conv', ['tranpose2'], ['output0'], name="14")]

        input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
        output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])

        graph = helper.make_graph(nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        self.assertIsNotNone(model)

        onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
        new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
        new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
        graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
        self.assertEqual(len(new_nodes), 11)
        self.assertIsNotNone(model) 
Example #5
Source File: test_opt.py    From onnxconverter-common with MIT License 5 votes vote down vote up
def test_fan_in(self):
        val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)

        nodes = []
        nodes[0:] = \
            [helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
                name='const0',
                data_type=onnx_proto.TensorProto.FLOAT,
                dims=val.shape,
                vals=val.flatten().astype(float)),
                              name="0")]
        nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'], name="1")]
        nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'], name="2")]
        nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'], name="3")]
        nodes[4:] = [helper.make_node('LeakyRelu', ['max0'], ['leak0'], name="4")]
        nodes[5:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak1'], name="5")]
        nodes[6:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak2'], name="6")]
        nodes[7:] = [helper.make_node('Transpose', ['leak1'], ['tranpose0'], perm=[0, 2, 3, 1], name="7")]
        nodes[8:] = [helper.make_node('Transpose', ['leak2'], ['tranpose1'], perm=[0, 2, 3, 1], name="8")]
        nodes[9:] = [helper.make_node('Add', ['tranpose0', 'tranpose1'], ['add0'], name="9")]
        nodes[10:] = [helper.make_node('Transpose', ['add0'], ['tranpose2'], perm=[0, 3, 1, 2], name="10")]
        nodes[11:] = [helper.make_node('Conv', ['tranpose2'], ['output0'], name="11")]

        input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
        output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])

        graph = helper.make_graph(nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        self.assertIsNotNone(model)

        onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
        new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
        new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
        graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
        self.assertEqual(len(new_nodes), 6)
        self.assertIsNotNone(model) 
Example #6
Source File: test_opt.py    From onnxconverter-common with MIT License 5 votes vote down vote up
def test_merge(self):
        val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)

        nodes = []
        nodes[0:] = \
            [helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
                name='const0',
                data_type=onnx_proto.TensorProto.FLOAT,
                dims=val.shape,
                vals=val.flatten().astype(float)))]
        nodes[1:] = [helper.make_node('Max', ['input1'], ['max0'])]
        nodes[2:] = [helper.make_node('Transpose', ['max0'], ['tranpose0'], perm=[0, 2, 3, 1])]
        nodes[3:] = [helper.make_node('Transpose', ['tranpose0'], ['add_input1'], perm=(0, 3, 1, 2))]
        nodes[4:] = [helper.make_node('Add', ['max0', 'add_input1'], ['output0'])]

        input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
        output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])

        graph = helper.make_graph(nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        self.assertIsNotNone(model)

        onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
        new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
        new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
        graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
        self.assertEqual(len(new_nodes), 3)
        self.assertIsNotNone(model) 
Example #7
Source File: test_opt.py    From onnxconverter-common with MIT License 5 votes vote down vote up
def test_move_transpose(self):
        val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)

        nodes = []
        nodes[0:] = \
            [helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
                name='const0',
                data_type=onnx_proto.TensorProto.FLOAT,
                dims=val.shape,
                vals=val.flatten().astype(float)))]
        nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'])]
        nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'])]
        nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'])]
        nodes[4:] = [helper.make_node('Transpose', ['max0'], ['tranpose0'], perm=[0, 2, 3, 1])]
        nodes[5:] = [helper.make_node('LeakyRelu', ['tranpose0'], ['tranpose1'])]
        nodes[6:] = [helper.make_node('Relu', ['tranpose1'], ['output0'], perm=(0, 3, 1, 2))]

        input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
        output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])

        graph = helper.make_graph(nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        self.assertIsNotNone(model)

        onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
        new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
        new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
        self.assertEqual(len(new_nodes), 5)
        graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
        self.assertIsNotNone(model) 
Example #8
Source File: test_opt.py    From onnxconverter-common with MIT License 5 votes vote down vote up
def test_optimizer(self):
        val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)

        nodes = []
        nodes[0:] = \
            [helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
                name='const0',
                data_type=onnx_proto.TensorProto.FLOAT,
                dims=val.shape,
                vals=val.flatten().astype(float)))]
        nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'])]
        nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'])]
        nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'])]
        nodes[4:] = [helper.make_node('Transpose', ['max0'], ['tranpose0'], perm=[0, 2, 3, 1])]
        nodes[5:] = [helper.make_node('Transpose', ['tranpose0'], ['tranpose1'], perm=(0, 3, 1, 2))]
        nodes[6:] = [helper.make_node('Relu', ['tranpose1'], ['output0'], perm=(0, 3, 1, 2))]

        input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
        output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])

        graph = helper.make_graph(nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        self.assertIsNotNone(model)
        onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
        new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
        new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
        self.assertEqual(len(new_nodes), 3)
        graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
        self.assertIsNotNone(model) 
Example #9
Source File: onnx_fx.py    From onnxconverter-common with MIT License 5 votes vote down vote up
def save(self, path):
        onnx.save_model(self.oxml, path) 
Example #10
Source File: decast.py    From onnxconverter-common with MIT License 5 votes vote down vote up
def main():
    if len(sys.argv) < 4:
        print('decast.py model_in  model_out <op1, ...>')
        return

    input = sys.argv[1]
    output = sys.argv[2]
    op_list = sys.argv[3:]

    oxml = onnx.load_model(input)
    oxml = decast(oxml, op_list)
    onnx.save_model(oxml, output) 
Example #11
Source File: keras_to_onnx.py    From keras-YOLOv3-model-set with MIT License 5 votes vote down vote up
def onnx_convert(keras_model_file, output_file, op_set):
    custom_object_dict = get_custom_objects()
    model = load_model(keras_model_file, custom_objects=custom_object_dict)

    # convert to onnx model
    onnx_model = keras2onnx.convert_keras(model, model.name, custom_op_conversions=custom_object_dict, target_opset=op_set)

    # save converted onnx model
    onnx.save_model(onnx_model, output_file) 
Example #12
Source File: load_save_model.py    From caffe-onnx with MIT License 5 votes vote down vote up
def saveonnxmodel(onnxmodel,onnx_save_path):
    try:
        onnx.checker.check_model(onnxmodel)
        onnx.save_model(onnxmodel, onnx_save_path)
        print("3.模型保存成功,已保存至"+onnx_save_path)
    except Exception as e:
        print("3.模型存在问题,未保存成功:\n",e) 
Example #13
Source File: basic_test.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def test_save_and_load_model(self):  # type: () -> None
        proto = self._simple_model()
        cls = ModelProto
        proto_string = onnx._serialize(proto)

        # Test if input is string
        loaded_proto = onnx.load_model_from_string(proto_string)
        self.assertTrue(proto == loaded_proto)

        # Test if input has a read function
        f = io.BytesIO()
        onnx.save_model(proto_string, f)
        f = io.BytesIO(f.getvalue())
        loaded_proto = onnx.load_model(f, cls)
        self.assertTrue(proto == loaded_proto)

        # Test if input is a file name
        try:
            fi = tempfile.NamedTemporaryFile(delete=False)
            onnx.save_model(proto, fi)
            fi.close()

            loaded_proto = onnx.load_model(fi.name, cls)
            self.assertTrue(proto == loaded_proto)
        finally:
            os.remove(fi.name) 
Example #14
Source File: test_utils.py    From keras-onnx with MIT License 4 votes vote down vote up
def run_onnx_runtime(case_name, onnx_model, data, expected, model_files, rtol=1.e-3, atol=1.e-6):
    if not os.path.exists(tmp_path):
        os.mkdir(tmp_path)
    temp_model_file = os.path.join(tmp_path, 'temp_' + case_name + '.onnx')
    onnx.save_model(onnx_model, temp_model_file)
    try:
        import onnxruntime
        sess = onnxruntime.InferenceSession(temp_model_file)
    except ImportError:
        keras2onnx.common.k2o_logger().warning("Cannot import ONNXRuntime!")
        return True

    if isinstance(data, dict):
        feed_input = data
    else:
        data = data if isinstance(data, list) else [data]
        input_names = sess.get_inputs()
        # to avoid too complicated test code, we restrict the input name in Keras test cases must be
        # in alphabetical order. It's always true unless there is any trick preventing that.
        feed = zip(sorted(i_.name for i_ in input_names), data)
        feed_input = dict(feed)
    actual = sess.run(None, feed_input)

    if expected is None:
        return

    if isinstance(expected, tuple):
        expected = list(expected)
    elif not isinstance(expected, list):
        expected = [expected]

    res = all(np.allclose(expected[n_], actual[n_], rtol=rtol, atol=atol) for n_ in range(len(expected)))

    if res and temp_model_file not in model_files:  # still keep the failed case files for the diagnosis.
        model_files.append(temp_model_file)

    if not res:
        for n_ in range(len(expected)):
            expected_list = expected[n_].flatten()
            actual_list = actual[n_].flatten()
            print_mismatches(case_name, n_, expected_list, actual_list, rtol, atol)

    return res 
Example #15
Source File: test_yolov3.py    From keras-onnx with MIT License 4 votes vote down vote up
def test_yolov3(self):
        img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')
        yolo3_yolo3_dir = os.path.join(os.path.dirname(__file__), '../../keras-yolo3/yolo3')
        try:
            import onnxruntime
        except ImportError:
            return True

        from PIL import Image

        for is_tiny_yolo in [True, False]:
            if is_tiny_yolo:
                if not os.path.exists(tiny_model_file_name):
                    urllib.request.urlretrieve(YOLOV3_TINY_WEIGHTS_PATH, tiny_model_file_name)
                yolo_weights = load_model(tiny_model_file_name)
                model_path = tiny_model_file_name  # model path or trained weights path
                anchors_path = 'model_data/tiny_yolo_anchors.txt'
                case_name = 'yolov3-tiny'
            else:
                if not os.path.exists(model_file_name):
                    urllib.request.urlretrieve(YOLOV3_WEIGHTS_PATH, model_file_name)
                yolo_weights = load_model(model_file_name)
                model_path = model_file_name  # model path or trained weights path
                anchors_path = 'model_data/yolo_anchors.txt'
                case_name = 'yolov3'

            my_yolo = YOLO(model_path, anchors_path, yolo3_yolo3_dir)
            my_yolo.load_model(yolo_weights)
            onnx_model = convert_model(my_yolo, is_tiny_yolo)

            if not os.path.exists(tmp_path):
                os.mkdir(tmp_path)
            temp_model_file = os.path.join(tmp_path, 'temp_' + case_name + '.onnx')
            onnx.save_model(onnx_model, temp_model_file)

            sess = onnxruntime.InferenceSession(temp_model_file)

            image = Image.open(img_path)
            image_data = my_yolo.prepare_keras_data(image)

            all_boxes_k, all_scores_k, indices_k = my_yolo.final_model.predict([image_data, np.array([image.size[1], image.size[0]], dtype='float32').reshape(1, 2)])

            image_data_onnx = np.transpose(image_data, [0, 3, 1, 2])

            feed_f = dict(zip(['input_1', 'image_shape'],
                              (image_data_onnx, np.array([image.size[1], image.size[0]], dtype='float32').reshape(1, 2))))
            all_boxes, all_scores, indices = sess.run(None, input_feed=feed_f)

            expected = self.post_compute(all_boxes_k, all_scores_k, indices_k)
            actual = self.post_compute(all_boxes, all_scores, indices)

            res = all(np.allclose(expected[n_], actual[n_]) for n_ in range(3))
            self.assertTrue(res) 
Example #16
Source File: test_mask_rcnn.py    From keras-onnx with MIT License 4 votes vote down vote up
def test_mask_rcnn(self):
        set_converter('CropAndResize', convert_tf_crop_and_resize)
        onnx_model = keras2onnx.convert_keras(model.keras_model)

        import skimage
        img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')
        image = skimage.io.imread(img_path)
        images = [image]
        case_name = 'mask_rcnn'

        if not os.path.exists(tmp_path):
            os.mkdir(tmp_path)
        temp_model_file = os.path.join(tmp_path, 'temp_' + case_name + '.onnx')
        onnx.save_model(onnx_model, temp_model_file)
        try:
            import onnxruntime
            sess = onnxruntime.InferenceSession(temp_model_file)
        except ImportError:
            return True

        # preprocessing
        molded_images, image_metas, windows = model.mold_inputs(images)
        anchors = model.get_anchors(molded_images[0].shape)
        anchors = np.broadcast_to(anchors, (model.config.BATCH_SIZE,) + anchors.shape)

        expected = model.keras_model.predict(
            [molded_images.astype(np.float32), image_metas.astype(np.float32), anchors])

        actual = \
            sess.run(None, {"input_image": molded_images.astype(np.float32),
                            "input_anchors": anchors,
                            "input_image_meta": image_metas.astype(np.float32)})

        rtol = 1.e-3
        atol = 1.e-6
        compare_idx = [0, 3]
        res = all(np.allclose(expected[n_], actual[n_], rtol=rtol, atol=atol) for n_ in compare_idx)
        if res and temp_model_file not in self.model_files:  # still keep the failed case files for the diagnosis.
            self.model_files.append(temp_model_file)
        if not res:
            for n_ in compare_idx:
                expected_list = expected[n_].flatten()
                actual_list = actual[n_].flatten()
                print_mismatches(case_name, n_, expected_list, actual_list, atol, rtol)

        self.assertTrue(res) 
Example #17
Source File: custom_layers_test.py    From onnx-coreml with MIT License 4 votes vote down vote up
def test_node_name_type_custom_functions(self):  # type: () -> None
    def convert_acos(builder, node, graph, err):
      params = NeuralNetwork_pb2.CustomLayerParams()
      params.className = node.op_type
      params.description = "Custom layer that corresponds to the ONNX op {}".format(node.op_type, )

      builder.add_custom(
        name=node.name,
        input_names=node.inputs,
        output_names=node.outputs,
        custom_proto_spec=params
      )

    def convert_topk_generic(builder, node, graph, err):
      params = NeuralNetwork_pb2.CustomLayerParams()
      params.className = node.op_type
      params.description = "Custom layer that corresponds to the ONNX op {}".format(node.op_type, )
      params.parameters["axis"].intValue = node.attrs.get('axis', -1)
      params.parameters["k"].intValue = node.attrs['k']

      builder.add_custom(
        name=node.name,
        input_names=node.inputs,
        output_names=node.outputs,
        custom_proto_spec=params
      )

    def convert_topk_node_specific(builder, node, graph, err):
      params = NeuralNetwork_pb2.CustomLayerParams()
      params.className = node.op_type
      params.description = "Custom layer that corresponds to the ONNX op {}".format(node.op_type, )
      params.parameters["axis"].intValue = node.attrs.get('axis', -1)

      builder.add_custom(
        name=node.name,
        input_names=node.inputs,
        output_names=node.outputs,
        custom_proto_spec=params
      )

    onnx_model = _make_model_acos_exp_topk()
    onnx.save_model(onnx_model, 'acos.onnx')
    coreml_model = convert(model=onnx_model,
                           add_custom_layers=True,
                           custom_conversion_functions={'Acos':convert_acos, 'TopK':convert_topk_generic,
                                                        'output_values_output_indices':convert_topk_node_specific})

    spec = coreml_model.get_spec()
    layers = spec.neuralNetwork.layers
    self.assertIsNotNone(layers[0].custom)
    self.assertIsNotNone(layers[2].custom)
    self.assertEqual('Acos', layers[0].custom.className)
    self.assertEqual('TopK', layers[2].custom.className)
    self.assertEqual(0, layers[2].custom.parameters['axis'].intValue)