Python onnx.helper.make_model() Examples
The following are 30
code examples of onnx.helper.make_model().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
onnx.helper
, or try the search function
.
Example #1
Source File: onnx_import_test.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 7 votes |
def test_broadcast(): """Test for broadcasting in onnx operators.""" input1 = np.random.rand(1, 3, 4, 5).astype("float32") input2 = np.random.rand(1, 5).astype("float32") inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)), helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))] outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))] nodes = [helper.make_node("Add", ["input1", "input2"], ["output"])] graph = helper.make_graph(nodes, "bcast_test", inputs, outputs) bcast_model = helper.make_model(graph) bkd_rep = mxnet_backend.prepare(bcast_model) numpy_op = input1 + input2 output = bkd_rep.run([input1, input2]) npt.assert_almost_equal(output[0], numpy_op)
Example #2
Source File: test_ops_matmul.py From ngraph-onnx with Apache License 2.0 | 7 votes |
def make_onnx_model_for_matmul_op(input_left, input_right): output_shape = np.matmul(input_left, input_right).shape node = make_node('MatMul', ['X', 'Y'], ['Z'], name='test_node') graph = make_graph([node], 'test_graph', [make_tensor_value_info('X', onnx.TensorProto.FLOAT, input_left.shape), make_tensor_value_info('Y', onnx.TensorProto.FLOAT, input_right.shape)], [make_tensor_value_info('Z', onnx.TensorProto.FLOAT, output_shape)]) model = make_model(graph, producer_name='ngraph ONNXImporter') return model
Example #3
Source File: onnx_converters.py From chainer-compiler with MIT License | 6 votes |
def generate_model(self, inputs, outputs, graph, model) -> 'ModelProto': # assign param names self.param2name = {id(p): 'param' + n.replace('/', '_') for n, p in model.namedparams()} for p, n in self.param2name.items(): assigned_names.append(n) # assign onnx name assign_onnx_name(graph) graph_ = self.generate_graph(inputs, outputs, graph, None, True) onnx_model = oh.make_model( graph_, producer_name="elichika", producer_version="0.1") return onnx_model
Example #4
Source File: test_ops_matmul.py From ngraph-onnx with Apache License 2.0 | 6 votes |
def make_onnx_model_for_gemm_op(input_a, input_b, input_c, **kwargs): input_a_for_output = input_a input_b_for_output = input_b if kwargs.get('transA'): input_a_for_output = input_a.T if kwargs.get('transB'): input_b_for_output = input_b.T output_shape = np.dot(input_a_for_output, input_b_for_output).shape node = make_node('Gemm', ['A', 'B', 'C'], ['Y'], name='test_node', **kwargs) graph = make_graph([node], 'test_graph', [make_tensor_value_info('A', onnx.TensorProto.FLOAT, input_a.shape), make_tensor_value_info('B', onnx.TensorProto.FLOAT, input_b.shape), make_tensor_value_info('C', onnx.TensorProto.FLOAT, input_c.shape)], [make_tensor_value_info('Y', onnx.TensorProto.FLOAT, output_shape)]) model = make_model(graph, producer_name='ngraph ONNXImporter') return model
Example #5
Source File: test_ops_unary.py From ngraph-onnx with Apache License 2.0 | 6 votes |
def test_identity(): np.random.seed(133391) shape = [2, 4] input_data = np.random.randn(*shape).astype(np.float32) identity_node = make_node('Identity', inputs=['x'], outputs=['y']) ng_results = run_node(identity_node, [input_data]) assert np.array_equal(ng_results, [input_data]) node1 = make_node('Add', inputs=['A', 'B'], outputs=['add1'], name='add_node1') node2 = make_node('Identity', inputs=['add1'], outputs=['identity1'], name='identity_node1') node3 = make_node('Abs', inputs=['identity1'], outputs=['Y'], name='abs_node1') graph = make_graph([node1, node2, node3], 'test_graph', [make_tensor_value_info('A', onnx.TensorProto.FLOAT, shape), make_tensor_value_info('B', onnx.TensorProto.FLOAT, shape)], [make_tensor_value_info('Y', onnx.TensorProto.FLOAT, shape)]) model = make_model(graph, producer_name='ngraph ONNX Importer') ng_model_function = import_onnx_model(model) runtime = get_runtime() computation = runtime.computation(ng_model_function) ng_results = computation(input_data, input_data) expected_result = np.abs(input_data + input_data) assert np.array_equal(ng_results[0], expected_result)
Example #6
Source File: test_graph_import.py From ngraph-onnx with Apache License 2.0 | 6 votes |
def test_simple_graph(): node1 = make_node('Add', ['A', 'B'], ['X'], name='add_node1') node2 = make_node('Add', ['X', 'C'], ['Y'], name='add_node2') graph = make_graph([node1, node2], 'test_graph', [make_tensor_value_info('A', onnx.TensorProto.FLOAT, [1]), make_tensor_value_info('B', onnx.TensorProto.FLOAT, [1]), make_tensor_value_info('C', onnx.TensorProto.FLOAT, [1])], [make_tensor_value_info('Y', onnx.TensorProto.FLOAT, [1])]) model = make_model(graph, producer_name='ngraph ONNXImporter') ng_model_function = import_onnx_model(model) runtime = get_runtime() computation = runtime.computation(ng_model_function) assert np.array_equal(computation(1, 2, 3)[0], np.array([6.0], dtype=np.float32)) assert np.array_equal(computation(4, 5, 6)[0], np.array([15.0], dtype=np.float32))
Example #7
Source File: test_factor_out_mul_sign_magnitude.py From finn with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_factor_out_mul_sign_magnitude(): top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [1, 2]) mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [1, 2]) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [1, 2]) modelproto = oh.make_model( oh.make_graph( name="test", inputs=[top_in], outputs=[top_out], value_info=[mul_param], nodes=[oh.make_node("Mul", ["top_in", "mul_param"], ["top_out"])], ) ) model = ModelWrapper(modelproto) model = model.transform(InferShapes()) model.set_initializer("mul_param", np.asarray([[-1, 4]], dtype=np.float32)) new_model = model.transform(FactorOutMulSignMagnitude()) inp_dict = {"top_in": np.asarray([[-1.0, 1.0]], dtype=np.float32)} assert ox.compare_execution(model, new_model, inp_dict)
Example #8
Source File: test_move_add_past_mul.py From finn with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_move_add_past_mul_single(): top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2]) add_param = oh.make_tensor_value_info("add_param", TensorProto.FLOAT, [2]) mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [2]) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2]) modelproto = oh.make_model( oh.make_graph( name="test", inputs=[top_in], outputs=[top_out], value_info=[add_param, mul_param], nodes=[ oh.make_node("Add", ["top_in", "add_param"], ["middle"]), oh.make_node("Mul", ["middle", "mul_param"], ["top_out"]), ], ) ) model = ModelWrapper(modelproto) model = model.transform(InferShapes()) model.set_initializer("add_param", np.asarray([1, 3], dtype=np.float32)) model.set_initializer("mul_param", np.asarray([2, 4], dtype=np.float32)) new_model = model.transform(MoveAddPastMul()) inp_dict = {"top_in": np.asarray([-1.0, 1.0], dtype=np.float32)} assert ox.compare_execution(model, new_model, inp_dict)
Example #9
Source File: onnx_import_test.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def test_equal(): """Test for logical greater in onnx operators.""" input1 = np.random.rand(1, 3, 4, 5).astype("float32") input2 = np.random.rand(1, 5).astype("float32") inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)), helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))] outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))] nodes = [helper.make_node("Equal", ["input1", "input2"], ["output"])] graph = helper.make_graph(nodes, "equal_test", inputs, outputs) greater_model = helper.make_model(graph) bkd_rep = mxnet_backend.prepare(greater_model) numpy_op = np.equal(input1, input2).astype(np.float32) output = bkd_rep.run([input1, input2]) npt.assert_almost_equal(output[0], numpy_op)
Example #10
Source File: onnx_import_test.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def test_lesser(): """Test for logical greater in onnx operators.""" input1 = np.random.rand(1, 3, 4, 5).astype("float32") input2 = np.random.rand(1, 5).astype("float32") inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)), helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))] outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))] nodes = [helper.make_node("Less", ["input1", "input2"], ["output"])] graph = helper.make_graph(nodes, "lesser_test", inputs, outputs) greater_model = helper.make_model(graph) bkd_rep = mxnet_backend.prepare(greater_model) numpy_op = np.less(input1, input2).astype(np.float32) output = bkd_rep.run([input1, input2]) npt.assert_almost_equal(output[0], numpy_op)
Example #11
Source File: test_is_linear.py From finn with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_is_linear_linear(): top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2]) add_param = oh.make_tensor_value_info("add_param", TensorProto.FLOAT, [2]) mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [2]) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2]) modelproto = oh.make_model( oh.make_graph( name="test", inputs=[top_in], outputs=[top_out], value_info=[add_param, mul_param], nodes=[ oh.make_node("Add", ["top_in", "add_param"], ["middle"]), oh.make_node("Mul", ["middle", "mul_param"], ["top_out"]), ], ) ) model = ModelWrapper(modelproto) model = model.transform(InferShapes()) ret = model.analysis(ta.is_linear) assert ret["is_linear"] is True
Example #12
Source File: backend.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def make_node_test_model(node, inputs, use_weights=True): # HACK TODO: The output info is unknown here; not sure what the best solution is output_dtype = np.float32 # Dummy value only output_shape = [-99] # Dummy value only graph_inputs = [onnx_helper.make_tensor_value_info( name, np2onnx_dtype(array.dtype), array.shape) for name, array in zip(node.input, inputs)] graph_outputs = [onnx_helper.make_tensor_value_info( name, np2onnx_dtype(output_dtype), output_shape) for name in node.output] if use_weights: # Add initializers for all inputs except the first initializers = [onnx_helper.make_tensor( name, np2onnx_dtype(array.dtype), array.shape, array.flatten().tolist()) for name, array in zip(node.input[1:], inputs[1:])] else: initializers = [] graph = onnx_helper.make_graph( [node], "RunNodeGraph_" + node.op_type, graph_inputs, graph_outputs, initializer=initializers) model = onnx_helper.make_model(graph) return model
Example #13
Source File: test_model.py From onnx-tensorflow with Apache License 2.0 | 6 votes |
def test_relu_node_inplace(self): X = np.random.randn(3, 2).astype(np.float32) Y_ref = np.clip(X, 0, np.inf) node_def = helper.make_node("Relu", ["X"], ["X1"]) graph_def = helper.make_graph( [node_def], name="test", inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, [3, 2])], outputs=[ helper.make_tensor_value_info("X1", TensorProto.FLOAT, [3, 2]) ]) tf_rep = prepare(helper.make_model(graph_def)) output = tf_rep.run({"X": X}) np.testing.assert_almost_equal(output.X1, Y_ref)
Example #14
Source File: utils.py From ngraph-python with Apache License 2.0 | 6 votes |
def convert_and_calculate(onnx_node, data_inputs, data_outputs): # type: (NodeProto, List[np.ndarray], List[np.ndarray]) -> List[np.ndarray] """ Convert ONNX node to ngraph node and perform computation on input data. :param onnx_node: ONNX NodeProto describing a computation node :param data_inputs: list of numpy ndarrays with input data :param data_outputs: list of numpy ndarrays with expected output data :return: list of numpy ndarrays with computed output """ transformer = get_transformer() input_tensors = [make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape) for name, value in zip(onnx_node.input, data_inputs)] output_tensors = [make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape) for name, value in zip(onnx_node.output, data_outputs)] graph = make_graph([onnx_node], 'test_graph', input_tensors, output_tensors) model = make_model(graph, producer_name='ngraph ONNXImporter') ng_results = [] for ng_model in import_onnx_model(model): computation = transformer.computation(ng_model['output'], *ng_model['inputs']) ng_results.append(computation(*data_inputs)) return ng_results
Example #15
Source File: test_forward.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def verify_hardsigmoid(input_dim, alpha, beta): dtype = 'float32' a_np1 = np.random.uniform(size=input_dim).astype(dtype) b_np = np.clip(a_np1 * alpha + beta, 0, 1) hardsigmoid_node = helper.make_node("HardSigmoid", ["a_np1"], ["out"], alpha=alpha, beta=beta) graph = helper.make_graph([hardsigmoid_node], "HardSigmoid_test", inputs = [helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim))], outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(b_np.shape))]) model = helper.make_model(graph, producer_name='HardSigmoid_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, [a_np1], target, ctx, b_np.shape) np.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
Example #16
Source File: test_forward.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def _test_softmax(inshape, axis): opname = 'Softmax' indata = np.random.uniform(size=inshape).astype(np.float32) outshape = inshape outdata = topi.testing.softmax_python(indata) if isinstance(axis, int): y = helper.make_node(opname, ['in'], ['out'], axis = axis) elif axis is None: y = helper.make_node(opname, ['in'], ['out']) graph = helper.make_graph([y], opname+'_test', inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))], outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))]) model = helper.make_model(graph, producer_name=opname+'_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, indata, target, ctx, outshape, 'float32') np.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)
Example #17
Source File: test_forward.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def _test_upsample_nearest(): scale = 2 in_shape = (1, 1, 3, 3) out_shape = (1, 1, 3*scale, 3*scale) y = helper.make_node("Upsample", ['in'], ['out'], mode='nearest', scales=[1.0, 1.0, 2.0, 2.0]) in_array = np.random.uniform(size=in_shape).astype(np.float32) out_array = topi.testing.upsampling_python(in_array, scale, "NCHW") graph = helper.make_graph([y], 'upsample_nearest_test', inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))], outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))]) model = helper.make_model(graph, producer_name='upsample_nearest_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, in_array, target, ctx, out_shape, 'float32') np.testing.assert_allclose(out_array, tvm_out)
Example #18
Source File: test_forward.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def test_matmul(): a_shape = (4, 3) b_shape = (3, 4) a_array = np.random.uniform(size=a_shape).astype('float32') b_array = np.random.uniform(size=b_shape).astype('float32') out_np = np.matmul(a_array, b_array) mul_node = helper.make_node("MatMul", ["a", "b"], ["out"]) graph = helper.make_graph([mul_node], "matmul_test", inputs = [helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)), helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape))], outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_np.shape))]) model = helper.make_model(graph, producer_name='matmul_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, [a_array, b_array], target, ctx, out_np.shape) np.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)
Example #19
Source File: test_forward.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def _test_onnx_op_elementwise(inshape, outfunc, npargs, dtype, opname, kwargs): indata = np.random.uniform(size=(2, 4, 5, 6)).astype(dtype) outdata = outfunc(indata, **npargs) y = helper.make_node(opname, ['in'], ['out'], **kwargs) graph = helper.make_graph([y], opname+'_test', inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))], outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))]) model = helper.make_model(graph, producer_name=opname+'_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, dtype) np.testing.assert_allclose(outdata, tvm_out)
Example #20
Source File: test_forward.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def _test_slice_iteration(indata, outdata, starts, ends, axes=None): if axes: y = helper.make_node("Slice", ['in'], ['out'], axes=axes, starts=starts, ends=ends) else: y = helper.make_node("Slice", ['in'], ['out'], starts=starts, ends=ends) graph = helper.make_graph([y], 'slice_test', inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))], outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))]) model = helper.make_model(graph, producer_name='slice_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, 'float32') np.testing.assert_allclose(outdata, tvm_out)
Example #21
Source File: test_forward.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def test_unsqueeze(): in_shape = (3, 3) axis = (0, 3, 4) out_shape = (1, 3, 3, 1, 1) y = helper.make_node("Unsqueeze", ['in'], ['out'], axes=list(axis)) graph = helper.make_graph([y], 'squeeze_test', inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))], outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))]) model = helper.make_model(graph, producer_name='squeeze_test') for target, ctx in ctx_list(): x = np.random.uniform(size=in_shape).astype('float32') tvm_out = get_tvm_output(model, x, target, ctx, out_shape, 'float32') np.testing.assert_allclose(out_shape, tvm_out.shape)
Example #22
Source File: test_forward.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def test_squeeze(): in_shape = (1, 3, 1, 3, 1, 1) out_shape = (3, 3) y = helper.make_node("Squeeze", ['in'], ['out'], axes=[0, 2, 4, 5]) graph = helper.make_graph([y], 'squeeze_test', inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))], outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))]) model = helper.make_model(graph, producer_name='squeeze_test') for target, ctx in ctx_list(): x = np.random.uniform(size=in_shape).astype('float32') tvm_out = get_tvm_output(model, x, target, ctx, out_shape, 'float32') np.testing.assert_allclose(out_shape, tvm_out.shape)
Example #23
Source File: test_forward.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def _test_power_iteration(x_shape, y_shape): if isinstance(y_shape, int): y_shape = [y_shape] x = np.random.uniform(size=x_shape).astype(np.float32) y = np.random.uniform(size=y_shape).astype(np.float32) np_res = np.power(x, y).astype(np.float32) res = helper.make_node("Pow", ['x', 'y'], ['out']) graph = helper.make_graph([res], 'power_test', inputs = [helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)), helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape))], outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(np_res.shape))]) model = helper.make_model(graph, producer_name='power_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, [x, y], target, ctx, np_res.shape) np.testing.assert_allclose(np_res, tvm_out, rtol=1e-5, atol=1e-5)
Example #24
Source File: test_model_wrappers.py From ngraph-python with Apache License 2.0 | 6 votes |
def test_attribute_wrapper(): def attribute_value_test(attribute_value): node = make_node('Abs', ['X'], [], name='test_node', test_attribute=attribute_value) model = make_model(make_graph([node], 'test_graph', [ make_tensor_value_info('X', onnx.TensorProto.FLOAT, [1, 2]), ], []), producer_name='ngraph') wrapped_attribute = ModelWrapper(model).graph.node[0].get_attribute('test_attribute') return wrapped_attribute.get_value() tensor = make_tensor('test_tensor', onnx.TensorProto.FLOAT, [1], [1]) assert attribute_value_test(1) == 1 assert type(attribute_value_test(1)) == np.long assert attribute_value_test(1.0) == 1.0 assert type(attribute_value_test(1.0)) == np.float assert attribute_value_test('test') == 'test' assert attribute_value_test(tensor)._proto == tensor assert attribute_value_test([1, 2, 3]) == [1, 2, 3] assert attribute_value_test([1.0, 2.0, 3.0]) == [1.0, 2.0, 3.0] assert attribute_value_test(['test1', 'test2']) == ['test1', 'test2'] assert attribute_value_test([tensor, tensor])[1]._proto == tensor
Example #25
Source File: helper_test.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def test_model_docstring(self): # type: () -> None graph = helper.make_graph([], "my graph", [], []) model_def = helper.make_model(graph, doc_string='test') # models may have their own documentation, but don't have a name # their name is the domain-qualified name of the underlying graph. self.assertFalse(hasattr(model_def, "name")) self.assertEqual(model_def.doc_string, 'test')
Example #26
Source File: version_converter_test.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def _converted( self, graph, # type: GraphProto initial_version, # type: OperatorSetIdProto target_version # type: int ): # type: (...) -> ModelProto orig_model = helper.make_model(graph, producer_name='onnx-test', opset_imports=[initial_version]) # print(type(orig_model)) converted_model = onnx.version_converter.convert_version(orig_model, target_version) checker.check_model(converted_model) return converted_model # Test 1: Backwards Incompatible Conversion: Reshape: 8 -> 2
Example #27
Source File: conversion_helpers.py From mxnet_to_onnx with Apache License 2.0 | 5 votes |
def from_mxnet(model_file, weight_file, input_shape, input_type, log=False): mx_weights = mx.ndarray.load(weight_file) with open(model_file, 'r') as f: graph = json.loads(f.read())["nodes"] converter = MxNetToONNXConverter() onnx_graph = converter.convert_mx2onnx_graph(graph, mx_weights, input_shape, mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(input_type)], log=log) onnx_model = helper.make_model(onnx_graph) return onnx_model
Example #28
Source File: test_fpgadataflow_convinputgenerator.py From finn with BSD 3-Clause "New" or "Revised" License | 5 votes |
def make_single_slidingwindow_modelwrapper( k, ifm_ch, ifm_dim, ofm_dim, simd, stride, idt ): odt = idt inp = helper.make_tensor_value_info( "inp", TensorProto.FLOAT, [1, ifm_dim, ifm_dim, ifm_ch] ) outp = helper.make_tensor_value_info( "outp", TensorProto.FLOAT, [1, ofm_dim, ofm_dim, k * k * ifm_ch] ) SlidingWindow_node = helper.make_node( "ConvolutionInputGenerator", ["inp"], ["outp"], domain="finn", backend="fpgadataflow", ConvKernelDim=k, IFMChannels=ifm_ch, IFMDim=ifm_dim, OFMDim=ofm_dim, SIMD=simd, Stride=stride, inputDataType=idt.name, outputDataType=odt.name, ) graph = helper.make_graph( nodes=[SlidingWindow_node], name="slidingwindow_graph", inputs=[inp], outputs=[outp], ) model = helper.make_model(graph, producer_name="slidingwindow-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) model.set_tensor_datatype("outp", odt) return model
Example #29
Source File: helper_test.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def test_model_metadata_props(self): # type: () -> None graph = helper.make_graph([], "my graph", [], []) model_def = helper.make_model(graph, doc_string='test') helper.set_model_props(model_def, {'Title': 'my graph', 'Keywords': 'test;graph'}) checker.check_model(model_def) helper.set_model_props(model_def, {'Title': 'my graph', 'Keywords': 'test;graph'}) checker.check_model(model_def) # helper replaces, so no dupe dupe = model_def.metadata_props.add() dupe.key = 'Title' dupe.value = 'Other' self.assertRaises(checker.ValidationError, checker.check_model, model_def)
Example #30
Source File: checker_test.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def test_check_old_model(self): # type: () -> None node = helper.make_node( "Pad", ["X"], ["Y"], paddings=(0, 0, 0, 0)) graph = helper.make_graph( [node], "test", [helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])], [helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])]) onnx_id = helper.make_opsetid("", 1) model = helper.make_model(graph, producer_name='test', opset_imports=[onnx_id]) checker.check_model(model)