Python onnx.load_from_string() Examples
The following are 10
code examples of onnx.load_from_string().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
onnx
, or try the search function
.
Example #1
Source File: modelwrapper.py From finn with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, onnx_model_proto, make_deepcopy=False): """Creates a ModelWrapper instance. onnx_model_proto can be either a ModelProto instance, or a string with the path to a stored .onnx file on disk, or serialized bytes. The make_deepcopy option controls whether a deep copy of the ModelProto is made internally. """ if isinstance(onnx_model_proto, str): self._model_proto = onnx.load(onnx_model_proto) elif isinstance(onnx_model_proto, bytes): self._model_proto = onnx.load_from_string(onnx_model_proto) else: if make_deepcopy: self._model_proto = copy.deepcopy(onnx_model_proto) else: self._model_proto = onnx_model_proto
Example #2
Source File: pytorch2onnx.py From mmdetection with Apache License 2.0 | 5 votes |
def export_onnx_model(model, inputs, passes): """Trace and export a model to onnx format. Modified from https://github.com/facebookresearch/detectron2/ Args: model (nn.Module): inputs (tuple[args]): the model will be called by `model(*inputs)` passes (None or list[str]): the optimization passed for ONNX model Returns: an onnx model """ assert isinstance(model, torch.nn.Module) # make sure all modules are in eval mode, onnx may change the training # state of the module if the states are not consistent def _check_eval(module): assert not module.training model.apply(_check_eval) # Export the model to ONNX with torch.no_grad(): with io.BytesIO() as f: torch.onnx.export( model, inputs, f, operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK, # verbose=True, # NOTE: uncomment this for debugging # export_params=True, ) onnx_model = onnx.load_from_string(f.getvalue()) # Apply ONNX's Optimization if passes is not None: all_passes = optimizer.get_available_passes() assert all(p in all_passes for p in passes), \ f'Only {all_passes} are supported' onnx_model = optimizer.optimize(onnx_model, passes) return onnx_model
Example #3
Source File: version_converter.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def convert_version(model, target_version): # type: (ModelProto, int) -> ModelProto if not isinstance(model, ModelProto): raise ValueError('VersionConverter only accepts ModelProto as model, incorrect type: {}'.format(type(model))) if not isinstance(target_version, int): raise ValueError('VersionConverter only accepts int as target_version, incorrect type: {}'.format(type(target_version))) model_str = model.SerializeToString() converted_model_str = C.convert_version(model_str, target_version) return onnx.load_from_string(converted_model_str)
Example #4
Source File: shape_inference.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def infer_shapes(model): # type: (ModelProto) -> ModelProto if not isinstance(model, ModelProto): raise ValueError('Shape inference only accepts ModelProto, ' 'incorrect type: {}'.format(type(model))) model_str = model.SerializeToString() inferred_model_str = C.infer_shapes(model_str) return onnx.load_from_string(inferred_model_str)
Example #5
Source File: optimizer.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def optimize(model, passes=[]): # type: (ModelProto, Sequence[Text]) -> ModelProto if len(passes) == 0: passes = ['eliminate_nop_transpose', 'fuse_consecutive_transposes', 'fuse_transpose_into_gemm'] if not isinstance(model, ModelProto): raise ValueError('Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model))) model_str = model.SerializeToString() optimized_model_str = C.optimize(model_str, passes) return onnx.load_from_string(optimized_model_str)
Example #6
Source File: parser.py From deep500 with BSD 3-Clause "New" or "Revised" License | 5 votes |
def load_and_parse_binary(binary: onnx.ModelProto) -> OnnxModel: model = onnx.load_from_string(binary) model = OnnxModel.create_from_onnx_model(model) model = clean_model(model) return model
Example #7
Source File: parser.py From deep500 with BSD 3-Clause "New" or "Revised" License | 5 votes |
def load_model_only(path: str) -> onnx.ModelProto: """ @param path path to file @return deserialized onnx model """ with open(path, 'rb') as model_file: binary = model_file.read() model = onnx.load_from_string(binary) return model
Example #8
Source File: caffe2_export.py From detectron2 with Apache License 2.0 | 5 votes |
def export_onnx_model(model, inputs): """ Trace and export a model to onnx format. Args: model (nn.Module): inputs (tuple[args]): the model will be called by `model(*inputs)` Returns: an onnx model """ assert isinstance(model, torch.nn.Module) # make sure all modules are in eval mode, onnx may change the training state # of the module if the states are not consistent def _check_eval(module): assert not module.training model.apply(_check_eval) # Export the model to ONNX with torch.no_grad(): with io.BytesIO() as f: torch.onnx.export( model, inputs, f, operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK, # verbose=True, # NOTE: uncomment this for debugging # export_params=True, ) onnx_model = onnx.load_from_string(f.getvalue()) # Apply ONNX's Optimization all_passes = onnx.optimizer.get_available_passes() passes = ["fuse_bn_into_conv"] assert all(p in all_passes for p in passes) onnx_model = onnx.optimizer.optimize(onnx_model, passes) return onnx_model
Example #9
Source File: tensorflow_export.py From fast-reid with Apache License 2.0 | 4 votes |
def _export_via_onnx(model, inputs): from ipdb import set_trace; set_trace() def _check_val(module): assert not module.training model.apply(_check_val) # Export the model to ONNX with torch.no_grad(): with io.BytesIO() as f: torch.onnx.export( model, inputs, f, # verbose=True, # NOTE: uncomment this for debugging export_params=True, ) onnx_model = onnx.load_from_string(f.getvalue()) # torch.onnx.export(model, # model being run # inputs, # model input (or a tuple for multiple inputs) # "reid_test.onnx", # where to save the model (can be a file or file-like object) # export_params=True, # store the trained parameter weights inside the model file # opset_version=10, # the ONNX version to export the model to # do_constant_folding=True, # whether to execute constant folding for optimization # input_names=['input'], # the model's input names # output_names=['output'], # the model's output names # dynamic_axes={'input': {0: 'batch_size'}, # variable lenght axes # 'output': {0: 'batch_size'}}) # ) # Apply ONNX's Optimization # all_passes = optimizer.get_available_passes() # passes = ["fuse_bn_into_conv"] # assert all(p in all_passes for p in passes) # onnx_model = optimizer.optimize(onnx_model, passes) # Convert ONNX Model to Tensorflow Model tf_rep = prepare(onnx_model, strict=False) # Import the ONNX model to Tensorflow print(tf_rep.inputs) # Input nodes to the model print('-----') print(tf_rep.outputs) # Output nodes from the model print('-----') # print(tf_rep.tensor_dict) # All nodes in the model # """ # install onnx-tensorflow from github,and tf_rep = prepare(onnx_model, strict=False) # Reference https://github.com/onnx/onnx-tensorflow/issues/167 # tf_rep = prepare(onnx_model) # whthout strict=False leads to KeyError: 'pyfunc_0' # debug, here using the same input to check onnx and tf. # output_onnx_tf = tf_rep.run(to_numpy(img)) # print('output_onnx_tf = {}'.format(output_onnx_tf)) # onnx --> tf.graph.pb # tf_pb_path = 'reid_tf_graph.pb' # tf_rep.export_graph(tf_pb_path) return tf_rep
Example #10
Source File: convert_pytorch_test.py From onnx-fb-universe with MIT License | 4 votes |
def convert_tests(testcases, sets=1): print("Collect {} test cases from PyTorch.".format(len(testcases))) failed = 0 ops = set() for t in testcases: test_name = get_test_name(t) module = gen_module(t) try: input = gen_input(t) f = io.BytesIO() torch.onnx._export(module, input, f) onnx_model = onnx.load_from_string(f.getvalue()) onnx.checker.check_model(onnx_model) onnx.helper.strip_doc_string(onnx_model) output_dir = os.path.join(test_onnx_common.pytorch_converted_dir, test_name) if os.path.exists(output_dir): shutil.rmtree(output_dir) os.makedirs(output_dir) with open(os.path.join(output_dir, "model.onnx"), "wb") as file: file.write(onnx_model.SerializeToString()) for i in range(sets): output = module(input) data_dir = os.path.join(output_dir, "test_data_set_{}".format(i)) os.makedirs(data_dir) for index, var in enumerate([input]): tensor = numpy_helper.from_array(var.data.numpy()) with open(os.path.join(data_dir, "input_{}.pb".format(index)), "wb") as file: file.write(tensor.SerializeToString()) for index, var in enumerate([output]): tensor = numpy_helper.from_array(var.data.numpy()) with open(os.path.join(data_dir, "output_{}.pb".format(index)), "wb") as file: file.write(tensor.SerializeToString()) input = gen_input(t) except: traceback.print_exc() failed += 1 print("Collect {} test cases from PyTorch repo, failed to export {} cases.".format( len(testcases), failed)) print("PyTorch converted cases are stored in {}.".format(test_onnx_common.pytorch_converted_dir))