Python onnx.save() Examples

The following are 9 code examples of onnx.save(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module onnx , or try the search function .
Example #1
Source File: input_rewriter.py    From chainer-compiler with MIT License 5 votes vote down vote up
def rewrite_onnx_file(model_filename, out_filename, new_input_types):
    xmodel = onnx.load(model_filename)
    xmodel = rewrite_onnx_model(xmodel, new_input_types)
    onnx.save(xmodel, out_filename)
    return xmodel 
Example #2
Source File: onnx_optimize.py    From gen-efficientnet-pytorch with Apache License 2.0 5 votes vote down vote up
def main():
    args = parser.parse_args()
    onnx_model = onnx.load(args.model)
    num_original_nodes, original_graph_str = traverse_graph(onnx_model.graph)

    # Optimizer passes to perform
    passes = [
        #'eliminate_deadend',
        'eliminate_identity',
        'eliminate_nop_dropout',
        'eliminate_nop_pad',
        'eliminate_nop_transpose',
        'eliminate_unused_initializer',
        'extract_constant_to_initializer',
        'fuse_add_bias_into_conv',
        'fuse_bn_into_conv',
        'fuse_consecutive_concats',
        'fuse_consecutive_reduce_unsqueeze',
        'fuse_consecutive_squeezes',
        'fuse_consecutive_transposes',
        #'fuse_matmul_add_bias_into_gemm',
        'fuse_pad_into_conv',
        #'fuse_transpose_into_gemm',
        #'lift_lexical_references',
    ]

    # Apply the optimization on the original serialized model
    optimized_model = optimizer.optimize(onnx_model, passes)

    num_optimized_nodes, optimzied_graph_str = traverse_graph(optimized_model.graph)
    print('==> The model after optimization:\n{}\n'.format(optimzied_graph_str))
    print('==> The optimized model has {} nodes, the original had {}.'.format(num_optimized_nodes, num_original_nodes))

    # Save the ONNX model
    onnx.save(optimized_model, args.output) 
Example #3
Source File: saver.py    From MMdnn with MIT License 5 votes vote down vote up
def save_model(MainModel, network_filepath, weight_filepath, dump_filepath):
    model = MainModel.KitModel(weight_filepath)
    onnx.save(model, dump_filepath)
    print('ONNX model file is saved as [{}], generated by [{}.py] and [{}].'.format(
        dump_filepath, network_filepath, weight_filepath)) 
Example #4
Source File: modelwrapper.py    From finn with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def save(self, filename):
        """Saves the wrapper ONNX ModelProto into a file with given name."""
        onnx.save(self._model_proto, filename) 
Example #5
Source File: graph.py    From nni with MIT License 5 votes vote down vote up
def graph_to_onnx(graph, onnx_model_path):
    import onnx
    # to do in the future using onnx ir
    onnx_out = graph.produce_onnx_model()
    onnx.save(onnx_out, onnx_model_path)
    return onnx_out 
Example #6
Source File: pytorch2onnx.py    From mmdetection with Apache License 2.0 4 votes vote down vote up
def main():
    args = parse_args()

    if not args.out.endswith('.onnx'):
        raise ValueError('The output file must be a onnx file.')

    if len(args.shape) == 1:
        input_shape = (3, args.shape[0], args.shape[0])
    elif len(args.shape) == 2:
        input_shape = (3, ) + tuple(args.shape)
    else:
        raise ValueError('invalid input shape')

    cfg = mmcv.Config.fromfile(args.config)
    cfg.model.pretrained = None

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    load_checkpoint(model, args.checkpoint, map_location='cpu')
    # Only support CPU mode for now
    model.cpu().eval()
    # Customized ops are not supported, use torchvision ops instead.
    for m in model.modules():
        if isinstance(m, (RoIPool, RoIAlign)):
            # set use_torchvision on-the-fly
            m.use_torchvision = True

    # TODO: a better way to override forward function
    if hasattr(model, 'forward_dummy'):
        model.forward = model.forward_dummy
    else:
        raise NotImplementedError(
            'ONNX conversion is currently not currently supported with '
            f'{model.__class__.__name__}')

    input_data = torch.empty((1, *input_shape),
                             dtype=next(model.parameters()).dtype,
                             device=next(model.parameters()).device)

    onnx_model = export_onnx_model(model, (input_data, ), args.passes)
    # Print a human readable representation of the graph
    onnx.helper.printable_graph(onnx_model.graph)
    print(f'saving model in {args.out}')
    onnx.save(onnx_model, args.out) 
Example #7
Source File: yolov3_to_onnx.py    From iAI with MIT License 4 votes vote down vote up
def main():
    """Run the DarkNet-to-ONNX conversion for YOLOv3-608."""
    # Have to use python 2 due to hashlib compatibility
    if sys.version_info[0] > 2:
        raise Exception("This is script is only compatible with python2, please re-run this script \
    with python2. The rest of this sample can be run with either version of python")

    # Download the config for YOLOv3 if not present yet, and analyze the checksum:
    cfg_file_path = download_file(
        'yolov3.cfg',
        'https://raw.githubusercontent.com/pjreddie/darknet/f86901f6177dfc6116360a13cc06ab680e0c86b0/cfg/yolov3.cfg',
        'b969a43a848bbf26901643b833cfb96c')

    # These are the only layers DarkNetParser will extract parameters from. The three layers of
    # type 'yolo' are not parsed in detail because they are included in the post-processing later:
    supported_layers = ['net', 'convolutional', 'shortcut',
                        'route', 'upsample']

    # Create a DarkNetParser object, and the use it to generate an OrderedDict with all
    # layer's configs from the cfg file:
    parser = DarkNetParser(supported_layers)
    layer_configs = parser.parse_cfg_file(cfg_file_path)
    # We do not need the parser anymore after we got layer_configs:
    del parser

    # In above layer_config, there are three outputs that we need to know the output
    # shape of (in CHW format):
    output_tensor_dims = OrderedDict()
    output_tensor_dims['082_convolutional'] = [255, 19, 19]
    output_tensor_dims['094_convolutional'] = [255, 38, 38]
    output_tensor_dims['106_convolutional'] = [255, 76, 76]

    # Create a GraphBuilderONNX object with the known output tensor dimensions:
    builder = GraphBuilderONNX(output_tensor_dims)

    # We want to populate our network with weights later, that's why we download those from
    # the official mirror (and verify the checksum):
    weights_file_path = download_file(
        'yolov3.weights',
        'https://pjreddie.com/media/files/yolov3.weights',
        'c84e5b99d0e52cd466ae710cadf6d84c')

    # Now generate an ONNX graph with weights from the previously parsed layer configurations
    # and the weights file:
    yolov3_model_def = builder.build_onnx_graph(
        layer_configs=layer_configs,
        weights_file_path=weights_file_path,
        verbose=True)
    # Once we have the model definition, we do not need the builder anymore:
    del builder

    # Perform a sanity check on the ONNX model definition:
    onnx.checker.check_model(yolov3_model_def)

    # Serialize the generated ONNX graph to this file:
    output_file_path = 'yolov3.onnx'
    onnx.save(yolov3_model_def, output_file_path) 
Example #8
Source File: yolov3_to_onnx.py    From iAI with MIT License 4 votes vote down vote up
def main():
    """Run the DarkNet-to-ONNX conversion for YOLOv3-608."""
    # Have to use python 2 due to hashlib compatibility
    if sys.version_info[0] > 2:
        raise Exception("This script is only compatible with python2, please re-run this script with python2. The rest of this sample can be run with either version of python.")

    # Download the config for YOLOv3 if not present yet, and analyze the checksum:
    cfg_file_path = download_file(
        'yolov3.cfg',
        'https://raw.githubusercontent.com/pjreddie/darknet/f86901f6177dfc6116360a13cc06ab680e0c86b0/cfg/yolov3.cfg',
        'b969a43a848bbf26901643b833cfb96c')

    # These are the only layers DarkNetParser will extract parameters from. The three layers of
    # type 'yolo' are not parsed in detail because they are included in the post-processing later:
    supported_layers = ['net', 'convolutional', 'shortcut',
                        'route', 'upsample']

    # Create a DarkNetParser object, and the use it to generate an OrderedDict with all
    # layer's configs from the cfg file:
    parser = DarkNetParser(supported_layers)
    layer_configs = parser.parse_cfg_file(cfg_file_path)
    # We do not need the parser anymore after we got layer_configs:
    del parser

    # In above layer_config, there are three outputs that we need to know the output
    # shape of (in CHW format):
    output_tensor_dims = OrderedDict()
    output_tensor_dims['082_convolutional'] = [255, 19, 19]
    output_tensor_dims['094_convolutional'] = [255, 38, 38]
    output_tensor_dims['106_convolutional'] = [255, 76, 76]

    # Create a GraphBuilderONNX object with the known output tensor dimensions:
    builder = GraphBuilderONNX(output_tensor_dims)

    # We want to populate our network with weights later, that's why we download those from
    # the official mirror (and verify the checksum):
    weights_file_path = download_file(
        'yolov3.weights',
        'https://pjreddie.com/media/files/yolov3.weights',
        'c84e5b99d0e52cd466ae710cadf6d84c')

    # Now generate an ONNX graph with weights from the previously parsed layer configurations
    # and the weights file:
    yolov3_model_def = builder.build_onnx_graph(
        layer_configs=layer_configs,
        weights_file_path=weights_file_path,
        verbose=True)
    # Once we have the model definition, we do not need the builder anymore:
    del builder

    # Perform a sanity check on the ONNX model definition:
    onnx.checker.check_model(yolov3_model_def)

    # Serialize the generated ONNX graph to this file:
    output_file_path = 'yolov3.onnx'
    onnx.save(yolov3_model_def, output_file_path) 
Example #9
Source File: quantize_model.py    From chainer-compiler with MIT License 4 votes vote down vote up
def main():
    parser = argparse.ArgumentParser(
        description='Quantize model with specified parameters')
    parser.add_argument('--no_per_channel', '-t',
                        action='store_true', default=False)
    parser.add_argument('--nbits', type=int, default=8)
    parser.add_argument('--quantization_mode', default='Integer',
                        choices=('Integer', 'QLinear'))
    parser.add_argument('--static', '-s', action='store_true', default=False)
    parser.add_argument('--asymmetric_input_types',
                        action='store_true', default=False)
    parser.add_argument('--input_quantization_params', default='')
    parser.add_argument('--output_quantization_params', default='')
    parser.add_argument('model')
    parser.add_argument('output')
    args = parser.parse_args()

    args.per_channel = not args.no_per_channel
    del args.no_per_channel

    if args.quantization_mode == 'QLinear':
        args.quantization_mode = quantize.QuantizationMode.QLinearOps
    else:
        args.quantization_mode = quantize.QuantizationMode.IntegerOps

    if len(args.input_quantization_params) != 0:
        args.input_quantization_params = json.loads(
            args.input_quantization_params)
    else:
        args.input_quantization_params = None

    if len(args.output_quantization_params) != 0:
        args.output_quantization_params = json.loads(
            args.output_quantization_params)
    else:
        args.output_quantization_params = None

    # Load the onnx model
    model_file = args.model
    model = onnx.load(model_file)
    del args.model

    output_file = args.output
    del args.output

    # Quantize
    print('Quantize config: {}'.format(vars(args)))
    quantized_model = quantize.quantize(model, **vars(args))

    print('Saving "{}" to "{}"'.format(model_file, output_file))

    # Save the quantized model
    onnx.save(quantized_model, output_file)