Python tensorflow.python.ops.nn_ops.conv2d() Examples
The following are 30
code examples of tensorflow.python.ops.nn_ops.conv2d().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.nn_ops
, or try the search function
.
Example #1
Source File: nn_grad.py From lambda-packs with MIT License | 6 votes |
def _Conv2DBackpropInputGrad(op, grad): """The derivatives for deconvolution. Args: op: the Deconvolution op. grad: the tensor representing the gradient w.r.t. the output Returns: the gradients w.r.t. the input and the filter """ return [None, nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]), op.inputs[2], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")), nn_ops.conv2d(grad, op.inputs[1], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format"))]
Example #2
Source File: rnn_cell.py From keras-lambda with MIT License | 6 votes |
def _attention(self, query, attn_states): conv2d = nn_ops.conv2d reduce_sum = math_ops.reduce_sum softmax = nn_ops.softmax tanh = math_ops.tanh with vs.variable_scope("attention"): k = vs.get_variable( "attn_w", [1, 1, self._attn_size, self._attn_vec_size]) v = vs.get_variable("attn_v", [self._attn_vec_size]) hidden = array_ops.reshape(attn_states, [-1, self._attn_length, 1, self._attn_size]) hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME") y = _linear(query, self._attn_vec_size, True) y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size]) s = reduce_sum(v * tanh(hidden_features + y), [2, 3]) a = softmax(s) d = reduce_sum( array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2]) new_attns = array_ops.reshape(d, [-1, self._attn_size]) new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1]) return new_attns, new_attn_states
Example #3
Source File: nn_grad.py From keras-lambda with MIT License | 6 votes |
def _Conv2DBackpropFilterGrad(op, grad): return [ nn_ops.conv2d_backprop_input( array_ops.shape(op.inputs[0]), grad, op.inputs[2], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")), None, nn_ops.conv2d( op.inputs[0], grad, op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")) ]
Example #4
Source File: nn_grad.py From keras-lambda with MIT License | 6 votes |
def _Conv2DBackpropInputGrad(op, grad): """The derivatives for deconvolution. Args: op: the Deconvolution op. grad: the tensor representing the gradient w.r.t. the output Returns: the gradients w.r.t. the input and the filter """ return [None, nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]), op.inputs[2], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")), nn_ops.conv2d(grad, op.inputs[1], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format"))]
Example #5
Source File: nn_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _Conv2DBackpropFilterGrad(op, grad): return [ nn_ops.conv2d_backprop_input( array_ops.shape(op.inputs[0]), grad, op.inputs[2], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")), None, nn_ops.conv2d( op.inputs[0], grad, op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")) ]
Example #6
Source File: nn_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _Conv2DBackpropInputGrad(op, grad): """The derivatives for deconvolution. Args: op: the Deconvolution op. grad: the tensor representing the gradient w.r.t. the output Returns: the gradients w.r.t. the input and the filter """ return [None, nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]), op.inputs[2], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")), nn_ops.conv2d(grad, op.inputs[1], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format"))]
Example #7
Source File: rnn_cell.py From deep_image_model with Apache License 2.0 | 6 votes |
def _attention(self, query, attn_states): conv2d = nn_ops.conv2d reduce_sum = math_ops.reduce_sum softmax = nn_ops.softmax tanh = math_ops.tanh with vs.variable_scope("Attention"): k = vs.get_variable("AttnW", [1, 1, self._attn_size, self._attn_vec_size]) v = vs.get_variable("AttnV", [self._attn_vec_size]) hidden = array_ops.reshape(attn_states, [-1, self._attn_length, 1, self._attn_size]) hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME") y = _linear(query, self._attn_vec_size, True) y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size]) s = reduce_sum(v * tanh(hidden_features + y), [2, 3]) a = softmax(s) d = reduce_sum( array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2]) new_attns = array_ops.reshape(d, [-1, self._attn_size]) new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1]) return new_attns, new_attn_states
Example #8
Source File: nn_grad.py From deep_image_model with Apache License 2.0 | 6 votes |
def _Conv2DBackpropFilterGrad(op, grad): return [ nn_ops.conv2d_backprop_input( array_ops.shape(op.inputs[0]), grad, op.inputs[2], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")), None, nn_ops.conv2d( op.inputs[0], grad, op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")) ]
Example #9
Source File: nn_grad.py From deep_image_model with Apache License 2.0 | 6 votes |
def _Conv2DBackpropInputGrad(op, grad): """The derivatives for deconvolution. Args: op: the Deconvolution op. grad: the tensor representing the gradient w.r.t. the output Returns: the gradients w.r.t. the input and the filter """ return [None, nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]), op.inputs[2], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")), nn_ops.conv2d(grad, op.inputs[1], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format"))]
Example #10
Source File: rnn_cell.py From Multiview2Novelview with MIT License | 6 votes |
def _attention(self, query, attn_states): conv2d = nn_ops.conv2d reduce_sum = math_ops.reduce_sum softmax = nn_ops.softmax tanh = math_ops.tanh with vs.variable_scope("attention"): k = vs.get_variable( "attn_w", [1, 1, self._attn_size, self._attn_vec_size]) v = vs.get_variable("attn_v", [self._attn_vec_size]) hidden = array_ops.reshape(attn_states, [-1, self._attn_length, 1, self._attn_size]) hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME") if self._linear3 is None: self._linear3 = _Linear(query, self._attn_vec_size, True) y = self._linear3(query) y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size]) s = reduce_sum(v * tanh(hidden_features + y), [2, 3]) a = softmax(s) d = reduce_sum( array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2]) new_attns = array_ops.reshape(d, [-1, self._attn_size]) new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1]) return new_attns, new_attn_states
Example #11
Source File: rnn_cell.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _attention(self, query, attn_states): conv2d = nn_ops.conv2d reduce_sum = math_ops.reduce_sum softmax = nn_ops.softmax tanh = math_ops.tanh with vs.variable_scope("attention"): k = vs.get_variable( "attn_w", [1, 1, self._attn_size, self._attn_vec_size]) v = vs.get_variable("attn_v", [self._attn_vec_size]) hidden = array_ops.reshape(attn_states, [-1, self._attn_length, 1, self._attn_size]) hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME") y = _linear(query, self._attn_vec_size, True) y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size]) s = reduce_sum(v * tanh(hidden_features + y), [2, 3]) a = softmax(s) d = reduce_sum( array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2]) new_attns = array_ops.reshape(d, [-1, self._attn_size]) new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1]) return new_attns, new_attn_states
Example #12
Source File: nn_grad.py From lambda-packs with MIT License | 6 votes |
def _Conv2DBackpropFilterGrad(op, grad): return [ nn_ops.conv2d_backprop_input( array_ops.shape(op.inputs[0]), grad, op.inputs[2], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")), None, nn_ops.conv2d( op.inputs[0], grad, op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")) ]
Example #13
Source File: nn_grad.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _Conv2DBackpropFilterGrad(op, grad): return [ nn_ops.conv2d_backprop_input( array_ops.shape(op.inputs[0]), grad, op.inputs[2], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")), None, nn_ops.conv2d( op.inputs[0], grad, op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")) ]
Example #14
Source File: rnn_cell.py From lambda-packs with MIT License | 6 votes |
def _attention(self, query, attn_states): conv2d = nn_ops.conv2d reduce_sum = math_ops.reduce_sum softmax = nn_ops.softmax tanh = math_ops.tanh with vs.variable_scope("attention"): k = vs.get_variable( "attn_w", [1, 1, self._attn_size, self._attn_vec_size]) v = vs.get_variable("attn_v", [self._attn_vec_size]) hidden = array_ops.reshape(attn_states, [-1, self._attn_length, 1, self._attn_size]) hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME") y = _linear(query, self._attn_vec_size, True) y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size]) s = reduce_sum(v * tanh(hidden_features + y), [2, 3]) a = softmax(s) d = reduce_sum( array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2]) new_attns = array_ops.reshape(d, [-1, self._attn_size]) new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1]) return new_attns, new_attn_states
Example #15
Source File: nn_grad.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _Conv2DBackpropInputGrad(op, grad): """The derivatives for deconvolution. Args: op: the Deconvolution op. grad: the tensor representing the gradient w.r.t. the output Returns: the gradients w.r.t. the input and the filter """ return [None, nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]), op.inputs[2], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")), nn_ops.conv2d(grad, op.inputs[1], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format"))]
Example #16
Source File: histogram_ops.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _strict_conv1d(x, h): """Return x * h for rank 1 tensors x and h.""" with ops.name_scope('strict_conv1d', values=[x, h]): x = array_ops.reshape(x, (1, -1, 1, 1)) h = array_ops.reshape(h, (-1, 1, 1, 1)) result = nn_ops.conv2d(x, h, [1, 1, 1, 1], 'SAME') return array_ops.reshape(result, [-1])
Example #17
Source File: histogram_ops.py From keras-lambda with MIT License | 5 votes |
def _strict_conv1d(x, h): """Return x * h for rank 1 tensors x and h.""" with ops.name_scope('strict_conv1d', values=[x, h]): x = array_ops.reshape(x, (1, -1, 1, 1)) h = array_ops.reshape(h, (-1, 1, 1, 1)) result = nn_ops.conv2d(x, h, [1, 1, 1, 1], 'SAME') return array_ops.reshape(result, [-1])
Example #18
Source File: optimize_for_inference_test.py From keras-lambda with MIT License | 5 votes |
def testFuseResizeAndConv(self): with self.test_session() as sess: inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6] input_op = constant_op.constant( np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32) resize_op = image_ops.resize_bilinear( input_op, [12, 4], align_corners=False) weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4] weights_op = constant_op.constant( np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32) nn_ops.conv2d( resize_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output") original_graph_def = sess.graph_def original_result = sess.run(["output:0"]) optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv( original_graph_def, ["output"]) with self.test_session() as sess: _ = importer.import_graph_def( optimized_graph_def, input_map={}, name="optimized") optimized_result = sess.run(["optimized/output:0"]) self.assertAllClose(original_result, optimized_result) for node in optimized_graph_def.node: self.assertNotEqual("Conv2D", node.op) self.assertNotEqual("ResizeBilinear", node.op)
Example #19
Source File: optimize_for_inference_test.py From keras-lambda with MIT License | 5 votes |
def testFuseResizePadAndConv(self): with self.test_session() as sess: inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6] input_op = constant_op.constant( np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32) resize_op = image_ops.resize_bilinear( input_op, [12, 4], align_corners=False) pad_op = array_ops.pad(resize_op, [[0, 0], [1, 1], [2, 2], [0, 0]], mode="REFLECT") weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4] weights_op = constant_op.constant( np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32) nn_ops.conv2d( pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output") original_graph_def = sess.graph_def original_result = sess.run(["output:0"]) optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv( original_graph_def, ["output"]) with self.test_session() as sess: _ = importer.import_graph_def( optimized_graph_def, input_map={}, name="optimized") optimized_result = sess.run(["optimized/output:0"]) self.assertAllClose(original_result, optimized_result) for node in optimized_graph_def.node: self.assertNotEqual("Conv2D", node.op) self.assertNotEqual("MirrorPad", node.op) self.assertNotEqual("ResizeBilinear", node.op)
Example #20
Source File: histogram_ops.py From lambda-packs with MIT License | 5 votes |
def _strict_conv1d(x, h): """Return x * h for rank 1 tensors x and h.""" with ops.name_scope('strict_conv1d', values=[x, h]): x = array_ops.reshape(x, (1, -1, 1, 1)) h = array_ops.reshape(h, (-1, 1, 1, 1)) result = nn_ops.conv2d(x, h, [1, 1, 1, 1], 'SAME') return array_ops.reshape(result, [-1])
Example #21
Source File: rnn_ops.py From video_prediction with MIT License | 5 votes |
def _conv2d(self, inputs, output_filters, bias_initializer): input_shape = inputs.get_shape().as_list() kernel_shape = list(self._kernel_size) + [input_shape[-1], output_filters] kernel = vs.get_variable("kernel", kernel_shape, dtype=dtypes.float32, initializer=init_ops.truncated_normal_initializer(stddev=0.02)) outputs = nn_ops.conv2d(inputs, kernel, [1] * 4, padding='SAME') if not self._normalizer_fn: bias = vs.get_variable('bias', [output_filters], dtype=dtypes.float32, initializer=bias_initializer) outputs = nn_ops.bias_add(outputs, bias) return outputs
Example #22
Source File: rnn_ops.py From video_prediction with MIT License | 5 votes |
def _conv2d(self, inputs): output_filters = 4 * self._filters input_shape = inputs.get_shape().as_list() kernel_shape = list(self._kernel_size) + [input_shape[-1], output_filters] kernel = vs.get_variable("kernel", kernel_shape, dtype=dtypes.float32, initializer=init_ops.truncated_normal_initializer(stddev=0.02)) outputs = nn_ops.conv2d(inputs, kernel, [1] * 4, padding='SAME') if not self._normalizer_fn: bias = vs.get_variable('bias', [output_filters], dtype=dtypes.float32, initializer=init_ops.zeros_initializer()) outputs = nn_ops.bias_add(outputs, bias) return outputs
Example #23
Source File: histogram_ops.py From deep_image_model with Apache License 2.0 | 5 votes |
def _strict_conv1d(x, h): """Return x * h for rank 1 tensors x and h.""" with ops.name_scope('strict_conv1d', values=[x, h]): x = array_ops.reshape(x, (1, -1, 1, 1)) h = array_ops.reshape(h, (-1, 1, 1, 1)) result = nn_ops.conv2d(x, h, [1, 1, 1, 1], 'SAME') return array_ops.reshape(result, [-1])
Example #24
Source File: optimize_for_inference_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def testFuseResizePadAndConv(self): with self.test_session() as sess: inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6] input_op = constant_op.constant( np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32) resize_op = image_ops.resize_bilinear( input_op, [12, 4], align_corners=False) pad_op = array_ops.pad(resize_op, [[0, 0], [1, 1], [2, 2], [0, 0]], mode="REFLECT") weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4] weights_op = constant_op.constant( np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32) nn_ops.conv2d( pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output") original_graph_def = sess.graph_def original_result = sess.run(["output:0"]) optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv( original_graph_def, ["output"]) with self.test_session() as sess: _ = importer.import_graph_def( optimized_graph_def, input_map={}, name="optimized") optimized_result = sess.run(["optimized/output:0"]) self.assertAllClose(original_result, optimized_result) for node in optimized_graph_def.node: self.assertNotEqual("Conv2D", node.op) self.assertNotEqual("MirrorPad", node.op) self.assertNotEqual("ResizeBilinear", node.op)
Example #25
Source File: test_forward.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def _test_convolution(tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format): """ One iteration of convolution with given shapes and attributes """ total_size_1 = 1 total_size_2 = 1 for s in tensor_in_sizes: total_size_1 *= s for s in filter_in_sizes: total_size_2 *= s # Initializes the input tensor with array containing incrementing # numbers from 1. data_array = [f * 1.0 for f in range(1, total_size_1 + 1)] filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)] with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype='float32') in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype='float32') strides = [1] + strides + [1] dilations = [1] + dilations + [1] nn_ops.conv2d(in_data, in_filter, strides=strides, padding=padding, data_format=data_format) compare_tf_with_tvm(np.reshape(data_array, tensor_in_sizes).astype('float32'), 'Placeholder:0', 'Conv2D:0')
Example #26
Source File: optimize_for_inference_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def testFuseResizeAndConv(self): with self.test_session() as sess: inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6] input_op = constant_op.constant( np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32) resize_op = image_ops.resize_bilinear( input_op, [12, 4], align_corners=False) weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4] weights_op = constant_op.constant( np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32) nn_ops.conv2d( resize_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output") original_graph_def = sess.graph_def original_result = sess.run(["output:0"]) optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv( original_graph_def, ["output"]) with self.test_session() as sess: _ = importer.import_graph_def( optimized_graph_def, input_map={}, name="optimized") optimized_result = sess.run(["optimized/output:0"]) self.assertAllClose(original_result, optimized_result) for node in optimized_graph_def.node: self.assertNotEqual("Conv2D", node.op) self.assertNotEqual("ResizeBilinear", node.op)
Example #27
Source File: convrnn.py From audio-super-res with MIT License | 4 votes |
def _conv_linear(args, filter_size, num_features, bias, bias_start=0.0, scope=None): """convolution: Args: args: a 4D Tensor or a list of 4D, batch x n, Tensors. filter_size: int tuple of filter height and width. num_features: int, number of features. bias_start: starting value to initialize the bias; 0 by default. scope: VariableScope for the created subgraph; defaults to "Linear". Returns: A 4D Tensor with shape [batch h w num_features] Raises: ValueError: if some of the arguments has unspecified or wrong shape. """ # Calculate the total size of arguments on dimension 1. total_arg_size_depth = 0 shapes = [a.get_shape().as_list() for a in args] for shape in shapes: if len(shape) != 4: raise ValueError("Linear is expecting 4D arguments: %s" % str(shapes)) if not shape[3]: raise ValueError("Linear expects shape[4] of arguments: %s" % str(shapes)) else: total_arg_size_depth += shape[3] dtype = [a.dtype for a in args][0] # Now the computation. with tf.variable_scope(scope or "Conv"): matrix = tf.get_variable( "Matrix", [filter_size[0], filter_size[1], total_arg_size_depth, num_features], dtype=dtype) if len(args) == 1: res = tf.nn.conv2d(args[0], matrix, strides=[1, 1, 1, 1], padding='SAME') else: res = tf.nn.conv2d(tf.concat(axis=3, values=args), matrix, strides=[1, 1, 1, 1], padding='SAME') if not bias: return res bias_term = tf.get_variable( "Bias", [num_features], dtype=dtype, initializer=tf.constant_initializer( bias_start, dtype=dtype)) return res + bias_term
Example #28
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 4 votes |
def _test_convolution(opname, tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, deconv_output_shape=[]): """ One iteration of convolution with given shapes and attributes """ total_size_1 = np.prod(tensor_in_sizes) total_size_2 = np.prod(filter_in_sizes) # Initializes the input tensor with array containing incrementing # numbers from 1. data_array = [f * 1.0 for f in range(1, total_size_1 + 1)] filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)] with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype='float32') in_filter = constant_op.constant( filter_array, shape=filter_in_sizes, dtype='float32') if data_format == 'NHWC': strides = [1] + strides + [1] dilations = [1] + dilations + [1] else: strides = [1, 1] + strides dilations = [1, 1] + dilations if opname == 'conv': nn_ops.conv2d(in_data, in_filter, strides=strides, dilations=dilations, padding=padding, data_format=data_format) compare_tf_with_tvm(np.reshape(data_array, tensor_in_sizes).astype('float32'), 'Placeholder:0', 'Conv2D:0') elif opname == 'conv_transpose': nn_ops.conv2d_transpose(in_data, in_filter, output_shape=deconv_output_shape, strides=strides, padding=padding, data_format=data_format) compare_tf_with_tvm(np.reshape(data_array, tensor_in_sizes).astype('float32'), 'Placeholder:0', 'conv2d_transpose:0') else: nn_ops.depthwise_conv2d_native(in_data, in_filter, strides=strides, dilations=dilations, padding=padding, data_format=data_format) compare_tf_with_tvm(np.reshape(data_array, tensor_in_sizes).astype('float32'), 'Placeholder:0', 'DepthwiseConv2dNative:0')
Example #29
Source File: optimize_for_inference_test.py From auto-alt-text-lambda-api with MIT License | 4 votes |
def testFoldBatchNorms(self): with self.test_session() as sess: inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6] input_op = constant_op.constant( np.array(inputs), shape=[1, 1, 6, 2], dtype=dtypes.float32) weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4] weights_op = constant_op.constant( np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32) conv_op = nn_ops.conv2d( input_op, weights_op, [1, 1, 1, 1], padding="SAME", name="conv_op") mean_op = constant_op.constant( np.array([10, 20]), shape=[2], dtype=dtypes.float32) variance_op = constant_op.constant( np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32) beta_op = constant_op.constant( np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32) gamma_op = constant_op.constant( np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32) ops.get_default_graph().graph_def_versions.producer = 8 gen_nn_ops._batch_norm_with_global_normalization( conv_op, mean_op, variance_op, beta_op, gamma_op, 0.00001, False, name="output") original_graph_def = sess.graph_def original_result = sess.run(["output:0"]) optimized_graph_def = optimize_for_inference_lib.fold_batch_norms( original_graph_def) with self.test_session() as sess: _ = importer.import_graph_def( optimized_graph_def, input_map={}, name="optimized") optimized_result = sess.run(["optimized/output:0"]) self.assertAllClose(original_result, optimized_result) for node in optimized_graph_def.node: self.assertNotEqual("BatchNormWithGlobalNormalization", node.op)
Example #30
Source File: optimize_for_inference_test.py From keras-lambda with MIT License | 4 votes |
def testFoldBatchNorms(self): with self.test_session() as sess: inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6] input_op = constant_op.constant( np.array(inputs), shape=[1, 1, 6, 2], dtype=dtypes.float32) weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4] weights_op = constant_op.constant( np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32) conv_op = nn_ops.conv2d( input_op, weights_op, [1, 1, 1, 1], padding="SAME", name="conv_op") mean_op = constant_op.constant( np.array([10, 20]), shape=[2], dtype=dtypes.float32) variance_op = constant_op.constant( np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32) beta_op = constant_op.constant( np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32) gamma_op = constant_op.constant( np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32) ops.get_default_graph().graph_def_versions.producer = 8 gen_nn_ops._batch_norm_with_global_normalization( conv_op, mean_op, variance_op, beta_op, gamma_op, 0.00001, False, name="output") original_graph_def = sess.graph_def original_result = sess.run(["output:0"]) optimized_graph_def = optimize_for_inference_lib.fold_batch_norms( original_graph_def) with self.test_session() as sess: _ = importer.import_graph_def( optimized_graph_def, input_map={}, name="optimized") optimized_result = sess.run(["optimized/output:0"]) self.assertAllClose(original_result, optimized_result) for node in optimized_graph_def.node: self.assertNotEqual("BatchNormWithGlobalNormalization", node.op)