Python onnx.mapping() Examples
The following are 11
code examples of onnx.mapping().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
onnx
, or try the search function
.
Example #1
Source File: test_ops_unary.py From ngraph-onnx with Apache License 2.0 | 6 votes |
def test_constant(value_type): values = np.random.randn(5, 5).astype(value_type) node = onnx.helper.make_node( 'Constant', inputs=[], outputs=['values'], value=onnx.helper.make_tensor( name='const_tensor', data_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(value_type)], dims=values.shape, vals=values.flatten())) ng_results = run_node(node, []) assert np.allclose(ng_results, [values]) # See https://github.com/onnx/onnx/issues/1190
Example #2
Source File: onnx_transforms.py From python-dlpy with Apache License 2.0 | 6 votes |
def run_transform(self, graph, node): mapping = self.get_mapping(node) bn, mul, add = mapping['bn'], mapping['mul'], mapping['add'] t = graph.tensor_dict scale = t[bn.input[1]] bias = t[bn.input[2]] _mul_tensor = t.get(mul.input[0], t[mul.input[1]]) mul_tensor = np.squeeze(_mul_tensor) _add_tensor = t.get(add.input[0], t[add.input[1]]) add_tensor = np.squeeze(_add_tensor) # multiply scale and bias t[bn.input[1]] = np.multiply(scale, mul_tensor) _bias = np.multiply(bias, mul_tensor) t[bn.input[2]] = np.add(_bias, add_tensor) # connect output of bn to output of add bn.output[0] = add.output[0] # remove mul and add nodes graph.remove_node(mul.name) graph.remove_node(add.name) return graph
Example #3
Source File: test_ops_unary.py From ngraph-onnx with Apache License 2.0 | 5 votes |
def test_cast_to_bool(val_type, input_data): expected = np.array(input_data, dtype=val_type) model = get_node_model('Cast', input_data, opset=6, to=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[val_type]) result = run_model(model, [input_data]) assert np.allclose(result, expected)
Example #4
Source File: test_ops_unary.py From ngraph-onnx with Apache License 2.0 | 5 votes |
def test_cast_to_float(val_type, range_start, range_end, in_dtype): np.random.seed(133391) input_data = np.random.randint(range_start, range_end, size=(2, 2), dtype=in_dtype) expected = np.array(input_data, dtype=val_type) model = get_node_model('Cast', input_data, opset=6, to=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[val_type]) result = run_model(model, [input_data]) assert np.allclose(result, expected)
Example #5
Source File: test_ops_unary.py From ngraph-onnx with Apache License 2.0 | 5 votes |
def test_cast_to_int(val_type): np.random.seed(133391) input_data = np.ceil(-8 + np.random.rand(2, 3, 4) * 16) expected = np.array(input_data, dtype=val_type) model = get_node_model('Cast', input_data, opset=6, to=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[val_type]) result = run_model(model, [input_data]) assert np.allclose(result, expected)
Example #6
Source File: test_ops_unary.py From ngraph-onnx with Apache License 2.0 | 5 votes |
def test_cast_to_uint(val_type): np.random.seed(133391) input_data = np.ceil(np.random.rand(2, 3, 4) * 16) expected = np.array(input_data, dtype=val_type) model = get_node_model('Cast', input_data, opset=6, to=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[val_type]) result = run_model(model, [input_data]) assert np.allclose(result, expected)
Example #7
Source File: model_wrappers.py From ngraph-python with Apache License 2.0 | 5 votes |
def get_dtype(self): # type: () -> numpy.dtype """Return the Numpy data type for this value.""" return onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[self.type.tensor_type.elem_type]
Example #8
Source File: onnx_transforms.py From python-dlpy with Apache License 2.0 | 5 votes |
def get_mapping(self, node, pattern=None): ''' Given that `node` is the root of a matched subgraph, returns a dict mapping names of the OpTypePatterns to their matched OnnxNodes Parameters ---------- node : :class:`OnnxNode` object The root node of a matching subgraph. pattern : :class:`OpTypePattern` object, optional The matching pattern. If None, defaults to self.pattern. Returns ------- dict key, value of OpTypePattern name and OnnxNode ''' if pattern is None: if self.pattern is None: raise ValueError('No pattern to match.') pattern = self.pattern mapping_dict = {} def _mapping(node, pattern, mapping_dict): if pattern.name is None: raise ValueError('Cannot generate mapping dict,' ' OpTypePattern name is None.') mapping_dict[pattern.name] = node for child, child_pattern in zip(node.children, pattern.outputs): _mapping(child, child_pattern, mapping_dict) return mapping_dict return _mapping(node, pattern, mapping_dict)
Example #9
Source File: onnx_transforms.py From python-dlpy with Apache License 2.0 | 5 votes |
def is_eligible(self, graph, node): mapping = self.get_mapping(node) bn, mul, add = mapping['bn'], mapping['mul'], mapping['add'] # only spatial batchnorm is supported if bn.attrs.get('spatial') is not None and bn.attrs['spatial'] != 1: return False # mul and add must be initialized by some tensor if (mul.input[0] not in graph.tensor_dict and mul.input[1] not in graph.tensor_dict): return False if (add.input[0] not in graph.tensor_dict and add.input[1] not in graph.tensor_dict): return False t = graph.tensor_dict scale = t[bn.input[1]] bias = t[bn.input[2]] _mul_tensor = t.get(mul.input[0], t[mul.input[1]]) mul_tensor = np.squeeze(_mul_tensor) _add_tensor = t.get(add.input[0], t[add.input[1]]) add_tensor = np.squeeze(_add_tensor) # check mul is broadcastable if mul_tensor.shape != scale.shape or mul_tensor.shape != bias.shape: if mul_tensor.shape != (1,) and mul_tensor.shape != (): return False # check add is broadcastable if add_tensor.shape != bias.shape: if add_tensor.shape != (1,) and add_tensor.shape != (): return False return True
Example #10
Source File: onnx.py From incubator-tvm with Apache License 2.0 | 5 votes |
def _impl_v5(cls, inputs, attr, params): try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE attr['to'] = str(TENSOR_TYPE_TO_NP_TYPE[attr['to']]) except ImportError as e: raise ImportError( "Unable to import onnx.mapping which is required {}".format(e)) return AttrCvt(op_name='cast', transforms={'to': 'dtype'})(inputs, attr)
Example #11
Source File: onnx.py From incubator-tvm with Apache License 2.0 | 5 votes |
def _parse_dtype(self, value_proto, dtype): """Parse dtype.""" try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE return TENSOR_TYPE_TO_NP_TYPE[value_proto.type.tensor_type.elem_type].name except AttributeError: return dtype