Python onnx.GraphProto() Examples
The following are 10
code examples of onnx.GraphProto().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
onnx
, or try the search function
.
Example #1
Source File: model_wrappers.py From ngraph-python with Apache License 2.0 | 5 votes |
def __init__(self, onnx_proto_instance): # type: (onnx.GraphProto) -> None super(GraphWrapper, self).__init__(onnx_proto_instance, self) self._ng_node_cache = {} # type: Dict[str, TensorOp] self.node = [NodeWrapper(node, self) for node in self._proto.node] self.input = [ValueInfoWrapper(inpt, self) for inpt in self._proto.input] self.output = [ValueInfoWrapper(output, self) for output in self._proto.output] self.initializer = [TensorWrapper(initializer, self) for initializer in self._proto.initializer] self._initialize_ng_tensors() self._initialize_ng_nodes()
Example #2
Source File: optimizer_test.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def _optimized(self, graph, opts): # type: (GraphProto, Sequence[Text]) -> ModelProto orig_model = helper.make_model(graph, producer_name='onnx-test') optimized_model = onnx.optimizer.optimize(orig_model, opts) checker.check_model(optimized_model) return optimized_model # input_types and output_types are lists of triples of (name, type, shape)
Example #3
Source File: optimizer_test.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def _visit_all_nodes_recursive(self, graph, fn): # type: (GraphProto, Callable[[NodeProto], None]) -> None for node in graph.node: fn(node) for attr in node.attribute: if attr.g is not None: self._visit_all_nodes_recursive(attr.g, fn) if len(attr.graphs): for gr in attr.graphs: self._visit_all_nodes_recursive(gr, fn)
Example #4
Source File: helper_test.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def test_attr_repeated_graph_proto(self): # type: () -> None graphs = [GraphProto(), GraphProto()] graphs[0].name = "a" graphs[1].name = "b" attr = helper.make_attribute("graphs", graphs) self.assertEqual(attr.name, "graphs") self.assertEqual(list(attr.graphs), graphs) checker.check_attribute(attr)
Example #5
Source File: shape_inference_test.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def _make_graph(self, seed_values, # type: Sequence[Union[Text, Tuple[Text, TensorProto.DataType, Any]]] nodes, # type: List[NodeProto] value_info, # type: List[ValueInfoProto] initializer=None # type: Optional[Sequence[TensorProto]] ): # type: (...) -> GraphProto if initializer is None: initializer = [] names_in_initializer = set(x.name for x in initializer) input_value_infos = [] # If the starting values are not also initializers, # introduce the starting values as the output of reshape, # so that the sizes are guaranteed to be unknown for seed_value in seed_values: if isinstance(seed_value, tuple): seed_name = seed_value[0] seed_value_info = make_tensor_value_info(*seed_value) else: seed_name = seed_value seed_value_info = make_empty_tensor_value_info(seed_value) if seed_name in names_in_initializer: input_value_infos.append(seed_value_info) else: value_info.append(seed_value_info) input_value_infos.append(make_tensor_value_info('SEED_' + seed_name, TensorProto.UNDEFINED, ())) input_value_infos.append(make_tensor_value_info('UNKNOWN_SHAPE_' + seed_name, TensorProto.UNDEFINED, ())) nodes[:0] = [make_node("Reshape", ['SEED_' + seed_name, 'UNKNOWN_SHAPE_' + seed_name], [seed_name])] return helper.make_graph(nodes, "test", input_value_infos, [], initializer=initializer, value_info=value_info)
Example #6
Source File: shape_inference_test.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def _inferred(self, graph): # type: (GraphProto) -> ModelProto orig_model = helper.make_model(graph, producer_name='onnx-test') inferred_model = onnx.shape_inference.infer_shapes(orig_model) checker.check_model(inferred_model) return inferred_model
Example #7
Source File: _opt_const_folding.py From onnxconverter-common with MIT License | 5 votes |
def reserve_node_for_embedded_graph(nodelist): # type: (onnx.GraphProto)->(onnx.GraphProto, frozenset) nodelist = _fix_unamed_node(nodelist) ginputs = [] for nd_ in nodelist: for _, subgraph_ in OnnxGraphContext.get_attr_graph(nd_).items(): inner_inputs = frozenset([i_.name for i_ in subgraph_.input]) for sub_nd_ in subgraph_.node: ginputs.extend([i_ for i_ in sub_nd_.input if i_ not in inner_inputs]) ginputs.extend(OnnxGraphContext.stopping_initializers) return nodelist, frozenset(ginputs)
Example #8
Source File: _opt_const_folding.py From onnxconverter-common with MIT License | 5 votes |
def const_folding_optimizer(graph, outer_graph=None): # type: (onnx.GraphProto, onnx.GraphProto)->onnx.GraphProto nodelist, reserved_names = reserve_node_for_embedded_graph(graph.node) opt_graph = OnnxGraphContext(graph, nodelist) node_status = {} for ts_ in graph.output: _dfs_calc(opt_graph, opt_graph.tensor_to_node[ts_.name], reserved_names, node_status) graph.initializer.extend([numpy_helper.from_array(ts_, nm_) for nm_, ts_ in opt_graph.variables.items()]) new_nodes = [nd_ for nd_ in nodelist if nd_.name in node_status] new_nodes = [nd_ for nd_ in new_nodes if nd_.output[0] not in opt_graph.variables] def node_key(nd_): return abs(node_status[nd_.name]) new_nodes.sort(key=node_key) pruned_initilizers = _remove_unused_initializers(new_nodes, graph.initializer, reserved_names, None if outer_graph is None else outer_graph.initializer) del graph.node[:] graph.node.extend(new_nodes) del graph.initializer[:] graph.initializer.extend(pruned_initilizers) for nd_ in graph.node: for aname_, subgraph_ in OnnxGraphContext.get_attr_graph(nd_).items(): opt_inner_graph = const_folding_optimizer(subgraph_, graph) lst_attrs = list(nd_.attribute) del nd_.attribute[:] lst_attrs = [helper.make_attribute(aname_, opt_inner_graph) if attr.name == aname_ else attr for attr in lst_attrs] nd_.attribute.extend(lst_attrs) return graph
Example #9
Source File: _graph.py From onnx-coreml with MIT License | 4 votes |
def from_onnx(graph, onnx_ir_version): # type: (GraphProto) -> Graph input_tensors = { t.name: numpy_helper.to_array(t) for t in graph.initializer } nodes_ = [] nodes_by_input = {} # type: Dict[Text, List[Node]] nodes_by_output = {} for node in graph.node: node_ = Node.from_onnx(node) for input_ in node_.inputs: if input_ in input_tensors: node_.input_tensors[input_] = input_tensors[input_] else: if input_ in nodes_by_input: input_nodes = nodes_by_input[input_] else: input_nodes = [] nodes_by_input[input_] = input_nodes input_nodes.append(node_) for output_ in node_.outputs: nodes_by_output[output_] = node_ nodes_.append(node_) inputs = [] for i in graph.input: if i.name not in input_tensors: inputs.append(_input_from_onnx_input(i)) outputs = [] for o in graph.output: outputs.append(_input_from_onnx_input(o)) for node_ in nodes_: for input_ in node_.inputs: if input_ in nodes_by_output: node_.parents.append(nodes_by_output[input_]) for output_ in node_.outputs: if output_ in nodes_by_input: node_.children.extend(nodes_by_input[output_]) # Dictionary to hold the "value_info" field from ONNX graph shape_dict = {} # type: Dict[Text,Tuple[int,...]] def extract_value_info(shape_dict, # type: Dict[Text,Tuple[int,...]] value_info, # type: ValueInfoProto[...] ): # type: (...) -> None t = tuple([int(dim.dim_value) for dim in value_info.type.tensor_type.shape.dim]) if t: shape_dict[value_info.name] = t for value_info in graph.value_info: extract_value_info(shape_dict, value_info) for value_info in graph.input: extract_value_info(shape_dict, value_info) for value_info in graph.output: extract_value_info(shape_dict, value_info) return Graph(nodes_, inputs, outputs, shape_dict, onnx_ir_version)
Example #10
Source File: _graph.py From onnx2caffe with MIT License | 4 votes |
def from_onnx(graph): # type: (GraphProto) -> Graph input_tensors = { t.name: numpy_helper.to_array(t) for t in graph.initializer } nodes_ = [] nodes_by_input = {} # type: Dict[Text, List[Node]] nodes_by_output = {} for node in graph.node: node_ = Node.from_onnx(node) for input_ in node_.inputs: if input_ in input_tensors: node_.input_tensors[input_] = input_tensors[input_] else: if input_ in nodes_by_input: input_nodes = nodes_by_input[input_] else: input_nodes = [] nodes_by_input[input_] = input_nodes input_nodes.append(node_) for output_ in node_.outputs: nodes_by_output[output_] = node_ nodes_.append(node_) inputs = [] for i in graph.input: if i.name not in input_tensors: inputs.append(_input_from_onnx_input(i)) outputs = [] for o in graph.output: outputs.append(_input_from_onnx_input(o)) for node_ in nodes_: for input_ in node_.inputs: if input_ in nodes_by_output: node_.parents.append(nodes_by_output[input_]) for output_ in node_.outputs: if output_ in nodes_by_input: node_.children.extend(nodes_by_input[output_]) # Dictionary to hold the "value_info" field from ONNX graph shape_dict = {} # type: Dict[Text,Tuple[int,...]] def extract_value_info(shape_dict, # type: Dict[Text,Tuple[int,...]] value_info, # type: ValueInfoProto[...] ): # type: (...) -> None shape_dict[value_info.name] = tuple([int(dim.dim_value) for dim in value_info.type.tensor_type.shape.dim]) for value_info in graph.value_info: extract_value_info(shape_dict, value_info) for value_info in graph.input: extract_value_info(shape_dict, value_info) for value_info in graph.output: extract_value_info(shape_dict, value_info) return Graph(nodes_, inputs, outputs, shape_dict)