Python tensorflow.concat() Examples
The following are 30
code examples of tensorflow.concat().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: model.py From DOTA_models with Apache License 2.0 | 6 votes |
def compute_column_softmax(self, column_controller_vector, time_step): #compute softmax over all the columns using column controller vector column_controller_vector = tf.tile( tf.expand_dims(column_controller_vector, 1), [1, self.num_cols + self.num_word_cols, 1]) #max_cols * bs * d column_controller_vector = nn_utils.apply_dropout( column_controller_vector, self.utility.FLAGS.dropout, self.mode) self.full_column_hidden_vectors = tf.concat( axis=1, values=[self.column_hidden_vectors, self.word_column_hidden_vectors]) self.full_column_hidden_vectors += self.summary_text_entry_embeddings self.full_column_hidden_vectors = nn_utils.apply_dropout( self.full_column_hidden_vectors, self.utility.FLAGS.dropout, self.mode) column_logits = tf.reduce_sum( column_controller_vector * self.full_column_hidden_vectors, 2) + ( self.params["word_match_feature_column_name"] * self.batch_column_exact_match) + self.full_column_mask column_softmax = tf.nn.softmax(column_logits) #batch_size * max_cols return column_softmax
Example #2
Source File: interaction.py From icme2019 with MIT License | 6 votes |
def call(self, inputs, **kwargs): if K.ndim(inputs[0]) != 3: raise ValueError( "Unexpected inputs dimensions %d, expect to be 3 dimensions" % (K.ndim(inputs))) embed_list = inputs row = [] col = [] num_inputs = len(embed_list) for i in range(num_inputs - 1): for j in range(i + 1, num_inputs): row.append(i) col.append(j) p = tf.concat([embed_list[idx] for idx in row], axis=1) # batch num_pairs k q = tf.concat([embed_list[idx] for idx in col], axis=1) inner_product = p * q if self.reduce_sum: inner_product = tf.reduce_sum( inner_product, axis=2, keep_dims=True) return inner_product
Example #3
Source File: vision_baseline_lstm.py From DOTA_models with Apache License 2.0 | 6 votes |
def visit_count_fc(visit_count, last_visit, embed_neurons, wt_decay, fc_dropout): with tf.variable_scope('embed_visit_count'): visit_count = tf.reshape(visit_count, shape=[-1]) last_visit = tf.reshape(last_visit, shape=[-1]) visit_count = tf.clip_by_value(visit_count, clip_value_min=-1, clip_value_max=15) last_visit = tf.clip_by_value(last_visit, clip_value_min=-1, clip_value_max=15) visit_count = tf.one_hot(visit_count, depth=16, axis=1, dtype=tf.float32, on_value=10., off_value=0.) last_visit = tf.one_hot(last_visit, depth=16, axis=1, dtype=tf.float32, on_value=10., off_value=0.) f = tf.concat([visit_count, last_visit], 1) x, _ = tf_utils.fc_network( f, neurons=embed_neurons, wt_decay=wt_decay, name='visit_count_embed', offset=0, batch_norm_param=None, dropout_ratio=fc_dropout, is_training=is_training) return x
Example #4
Source File: convolution.py From Traffic_sign_detection_YOLO with MIT License | 6 votes |
def _forward(self): inp = self.inp.out shape = inp.get_shape().as_list() _, h, w, c = shape s = self.lay.stride out = list() for i in range(int(h/s)): row_i = list() for j in range(int(w/s)): si, sj = s * i, s * j boxij = inp[:, si: si+s, sj: sj+s,:] flatij = tf.reshape(boxij, [-1,1,1,c*s*s]) row_i += [flatij] out += [tf.concat(row_i, 2)] self.out = tf.concat(out, 1)
Example #5
Source File: vision_baseline_lstm.py From DOTA_models with Apache License 2.0 | 6 votes |
def combine_setup(name, combine_type, embed_img, embed_goal, num_img_neuorons=None, num_goal_neurons=None): with tf.name_scope(name + '_' + combine_type): if combine_type == 'add': # Simple concat features from goal and image out = embed_img + embed_goal elif combine_type == 'multiply': # Multiply things together re_embed_img = tf.reshape( embed_img, shape=[-1, num_img_neuorons / num_goal_neurons, num_goal_neurons]) re_embed_goal = tf.reshape(embed_goal, shape=[-1, num_goal_neurons, 1]) x = tf.matmul(re_embed_img, re_embed_goal, transpose_a=False, transpose_b=False) out = slim.flatten(x) elif combine_type == 'none' or combine_type == 'imgonly': out = embed_img elif combine_type == 'goalonly': out = embed_goal else: logging.fatal('Undefined combine_type: %s', combine_type) return out
Example #6
Source File: sequence.py From icme2019 with MIT License | 6 votes |
def call(self, x): if (self.size == None) or (self.mode == 'sum'): self.size = int(x.shape[-1]) position_j = 1. / \ K.pow(10000., 2 * K.arange(self.size / 2, dtype='float32') / self.size) position_j = K.expand_dims(position_j, 0) position_i = tf.cumsum(K.ones_like(x[:, :, 0]), 1) - 1 position_i = K.expand_dims(position_i, 2) position_ij = K.dot(position_i, position_j) outputs = K.concatenate( [K.cos(position_ij), K.sin(position_ij)], 2) if self.mode == 'sum': if self.scale: outputs = outputs * outputs ** 0.5 return x + outputs elif self.mode == 'concat': return K.concatenate([outputs, x], 2)
Example #7
Source File: convolution.py From Traffic_sign_detection_YOLO with MIT License | 6 votes |
def forward(self): pad = [[self.lay.pad, self.lay.pad]] * 2; temp = tf.pad(self.inp.out, [[0, 0]] + pad + [[0, 0]]) k = self.lay.w['kernels'] ksz = self.lay.ksize half = int(ksz / 2) out = list() for i in range(self.lay.h_out): row_i = list() for j in range(self.lay.w_out): kij = k[i * self.lay.w_out + j] i_, j_ = i + 1 - half, j + 1 - half tij = temp[:, i_ : i_ + ksz, j_ : j_ + ksz,:] row_i.append( tf.nn.conv2d(tij, kij, padding = 'VALID', strides = [1] * 4)) out += [tf.concat(row_i, 2)] self.out = tf.concat(out, 1)
Example #8
Source File: core.py From icme2019 with MIT License | 6 votes |
def call(self, inputs, **kwargs): query, keys = inputs keys_len = keys.get_shape()[1] queries = K.repeat_elements(query, keys_len, 1) att_input = tf.concat( [queries, keys, queries - keys, queries * keys], axis=-1) att_out = MLP(self.hidden_size, self.activation, self.l2_reg, self.keep_prob, self.use_bn, seed=self.seed)(att_input) attention_score = tf.nn.bias_add(tf.tensordot( att_out, self.kernel, axes=(-1, 0)), self.bias) return attention_score
Example #9
Source File: network_units.py From DOTA_models with Apache License 2.0 | 6 votes |
def embedding_lookup(embedding_matrix, indices, ids, weights, size): """Performs a weighted embedding lookup. Args: embedding_matrix: float Tensor from which to do the lookup. indices: int Tensor for the output rows of the looked up vectors. ids: int Tensor vectors to look up in the embedding_matrix. weights: float Tensor weights to apply to the looked up vectors. size: int number of output rows. Needed since some output rows may be empty. Returns: Weighted embedding vectors. """ embeddings = tf.nn.embedding_lookup([embedding_matrix], ids) # TODO(googleuser): allow skipping weights. broadcast_weights_shape = tf.concat([tf.shape(weights), [1]], 0) embeddings *= tf.reshape(weights, broadcast_weights_shape) embeddings = tf.unsorted_segment_sum(embeddings, indices, size) return embeddings
Example #10
Source File: network_units.py From DOTA_models with Apache License 2.0 | 6 votes |
def pass_through_embedding_matrix(act_block, embedding_matrix, step_idx): """Passes the activations through the embedding_matrix. Takes care to handle out of bounds lookups. Args: act_block: matrix of activations. embedding_matrix: matrix of weights. step_idx: vector containing step indices, with -1 indicating out of bounds. Returns: the embedded activations. """ # Indicator vector for out of bounds lookups. step_idx_mask = tf.expand_dims(tf.equal(step_idx, -1), -1) # Pad the last column of the activation vectors with the indicator. act_block = tf.concat([act_block, tf.to_float(step_idx_mask)], 1) return tf.matmul(act_block, embedding_matrix)
Example #11
Source File: networks.py From disentangling_conditional_gans with MIT License | 6 votes |
def minibatch_stddev_layer(x, group_size=4): with tf.variable_scope('MinibatchStddev'): group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size. s = x.shape # [NCHW] Input shape. y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) # [GMCHW] Split minibatch into M groups of size G. y = tf.cast(y, tf.float32) # [GMCHW] Cast to FP32. y -= tf.reduce_mean(y, axis=0, keep_dims=True) # [GMCHW] Subtract mean over group. y = tf.reduce_mean(tf.square(y), axis=0) # [MCHW] Calc variance over group. y = tf.sqrt(y + 1e-8) # [MCHW] Calc stddev over group. y = tf.reduce_mean(y, axis=[1,2,3], keep_dims=True) # [M111] Take average over fmaps and pixels. y = tf.cast(y, x.dtype) # [M111] Cast back to original data type. y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [N1HW] Replicate over group and pixels. return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap. #---------------------------------------------------------------------------- # Generator network used in the paper.
Example #12
Source File: model.py From DOTA_models with Apache License 2.0 | 6 votes |
def compute_first_or_last(self, select, first=True): #perform first ot last operation on row select with probabilistic row selection answer = tf.zeros_like(select) running_sum = tf.zeros([self.batch_size, 1], self.data_type) for i in range(self.max_elements): if (first): current = tf.slice(select, [0, i], [self.batch_size, 1]) else: current = tf.slice(select, [0, self.max_elements - 1 - i], [self.batch_size, 1]) curr_prob = current * (1 - running_sum) curr_prob = curr_prob * tf.cast(curr_prob >= 0.0, self.data_type) running_sum += curr_prob temp_ans = [] curr_prob = tf.expand_dims(tf.reshape(curr_prob, [self.batch_size]), 0) for i_ans in range(self.max_elements): if (not (first) and i_ans == self.max_elements - 1 - i): temp_ans.append(curr_prob) elif (first and i_ans == i): temp_ans.append(curr_prob) else: temp_ans.append(tf.zeros_like(curr_prob)) temp_ans = tf.transpose(tf.concat(axis=0, values=temp_ans)) answer += temp_ans return answer
Example #13
Source File: inception_resnet_v2.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): """Builds the 35x35 resnet block.""" with tf.variable_scope(scope, 'Block35', [net], reuse=reuse): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1') with tf.variable_scope('Branch_1'): tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1') tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3') tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3') mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2]) up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1') net += scale * up if activation_fn: net = activation_fn(net) return net
Example #14
Source File: inception_resnet_v2.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): """Builds the 17x17 resnet block.""" with tf.variable_scope(scope, 'Block17', [net], reuse=reuse): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1') with tf.variable_scope('Branch_1'): tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7], scope='Conv2d_0b_1x7') tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1], scope='Conv2d_0c_7x1') mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2]) up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1') net += scale * up if activation_fn: net = activation_fn(net) return net
Example #15
Source File: inception_resnet_v2.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): """Builds the 8x8 resnet block.""" with tf.variable_scope(scope, 'Block8', [net], reuse=reuse): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1') with tf.variable_scope('Branch_1'): tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3], scope='Conv2d_0b_1x3') tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1], scope='Conv2d_0c_3x1') mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2]) up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1') net += scale * up if activation_fn: net = activation_fn(net) return net
Example #16
Source File: utils.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def preprocess_batch(images_batch, preproc_func=None): """ Creates a preprocessing graph for a batch given a function that processes a single image. :param images_batch: A tensor for an image batch. :param preproc_func: (optional function) A function that takes in a tensor and returns a preprocessed input. """ if preproc_func is None: return images_batch with tf.variable_scope('preprocess'): images_list = tf.split(images_batch, int(images_batch.shape[0])) result_list = [] for img in images_list: reshaped_img = tf.reshape(img, img.shape[1:]) processed_img = preproc_func(reshaped_img) result_list.append(tf.expand_dims(processed_img, axis=0)) result_images = tf.concat(result_list, axis=0) return result_images
Example #17
Source File: model.py From DOTA_models with Apache License 2.0 | 6 votes |
def encode_coordinates_fn(self, net): """Adds one-hot encoding of coordinates to different views in the networks. For each "pixel" of a feature map it adds a onehot encoded x and y coordinates. Args: net: a tensor of shape=[batch_size, height, width, num_features] Returns: a tensor with the same height and width, but altered feature_size. """ mparams = self._mparams['encode_coordinates_fn'] if mparams.enabled: batch_size, h, w, _ = net.shape.as_list() x, y = tf.meshgrid(tf.range(w), tf.range(h)) w_loc = slim.one_hot_encoding(x, num_classes=w) h_loc = slim.one_hot_encoding(y, num_classes=h) loc = tf.concat([h_loc, w_loc], 2) loc = tf.tile(tf.expand_dims(loc, 0), [batch_size, 1, 1, 1]) return tf.concat([net, loc], 3) else: return net
Example #18
Source File: model.py From DOTA_models with Apache License 2.0 | 6 votes |
def pool_views_fn(self, nets): """Combines output of multiple convolutional towers into a single tensor. It stacks towers one on top another (in height dim) in a 4x1 grid. The order is arbitrary design choice and shouldn't matter much. Args: nets: list of tensors of shape=[batch_size, height, width, num_features]. Returns: A tensor of shape [batch_size, seq_length, features_size]. """ with tf.variable_scope('pool_views_fn/STCK'): net = tf.concat(nets, 1) batch_size = net.get_shape().dims[0].value feature_size = net.get_shape().dims[3].value return tf.reshape(net, [batch_size, -1, feature_size])
Example #19
Source File: model.py From DOTA_models with Apache License 2.0 | 6 votes |
def max_pool_views(self, nets_list): """Max pool across all nets in spatial dimensions. Args: nets_list: A list of 4D tensors with identical size. Returns: A tensor with the same size as any input tensors. """ batch_size, height, width, num_features = [ d.value for d in nets_list[0].get_shape().dims ] xy_flat_shape = (batch_size, 1, height * width, num_features) nets_for_merge = [] with tf.variable_scope('max_pool_views', values=nets_list): for net in nets_list: nets_for_merge.append(tf.reshape(net, xy_flat_shape)) merged_net = tf.concat(nets_for_merge, 1) net = slim.max_pool2d( merged_net, kernel_size=[len(nets_list), 1], stride=1) net = tf.reshape(net, (batch_size, height, width, num_features)) return net
Example #20
Source File: tensor.py From spleeter with MIT License | 6 votes |
def pad_and_reshape(instr_spec, frame_length, F): """ :param instr_spec: :param frame_length: :param F: :returns: """ spec_shape = tf.shape(instr_spec) extension_row = tf.zeros((spec_shape[0], spec_shape[1], 1, spec_shape[-1])) n_extra_row = (frame_length) // 2 + 1 - F extension = tf.tile(extension_row, [1, 1, n_extra_row, 1]) extended_spec = tf.concat([instr_spec, extension], axis=2) old_shape = tf.shape(extended_spec) new_shape = tf.concat([ [old_shape[0] * old_shape[1]], old_shape[2:]], axis=0) processed_instr_spec = tf.reshape(extended_spec, new_shape) return processed_instr_spec
Example #21
Source File: inception_resnet_v2.py From DOTA_models with Apache License 2.0 | 6 votes |
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): """Builds the 35x35 resnet block.""" with tf.variable_scope(scope, 'Block35', [net], reuse=reuse): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1') with tf.variable_scope('Branch_1'): tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1') tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3') tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3') mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2]) up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1') net += scale * up if activation_fn: net = activation_fn(net) return net
Example #22
Source File: inception_resnet_v2.py From DOTA_models with Apache License 2.0 | 6 votes |
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): """Builds the 17x17 resnet block.""" with tf.variable_scope(scope, 'Block17', [net], reuse=reuse): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1') with tf.variable_scope('Branch_1'): tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7], scope='Conv2d_0b_1x7') tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1], scope='Conv2d_0c_7x1') mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2]) up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1') net += scale * up if activation_fn: net = activation_fn(net) return net
Example #23
Source File: inception_resnet_v2.py From DOTA_models with Apache License 2.0 | 6 votes |
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): """Builds the 8x8 resnet block.""" with tf.variable_scope(scope, 'Block8', [net], reuse=reuse): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1') with tf.variable_scope('Branch_1'): tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3], scope='Conv2d_0b_1x3') tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1], scope='Conv2d_0c_3x1') mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2]) up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1') net += scale * up if activation_fn: net = activation_fn(net) return net
Example #24
Source File: inception_v4.py From DOTA_models with Apache License 2.0 | 6 votes |
def block_inception_a(inputs, scope=None, reuse=None): """Builds Inception-A block for Inception v4 network.""" # By default use stride=1 and SAME padding with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], stride=1, padding='SAME'): with tf.variable_scope(scope, 'BlockInceptionA', [inputs], reuse=reuse): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(inputs, 96, [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, 96, [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, 96, [1, 1], scope='Conv2d_0b_1x1') return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
Example #25
Source File: inception_v4.py From DOTA_models with Apache License 2.0 | 6 votes |
def block_reduction_a(inputs, scope=None, reuse=None): """Builds Reduction-A block for Inception v4 network.""" # By default use stride=1 and SAME padding with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], stride=1, padding='SAME'): with tf.variable_scope(scope, 'BlockReductionA', [inputs], reuse=reuse): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(inputs, 384, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, 256, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3') return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
Example #26
Source File: inception_v4.py From DOTA_models with Apache License 2.0 | 6 votes |
def block_reduction_b(inputs, scope=None, reuse=None): """Builds Reduction-B block for Inception v4 network.""" # By default use stride=1 and SAME padding with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], stride=1, padding='SAME'): with tf.variable_scope(scope, 'BlockReductionB', [inputs], reuse=reuse): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(branch_0, 192, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, 256, [1, 7], scope='Conv2d_0b_1x7') branch_1 = slim.conv2d(branch_1, 320, [7, 1], scope='Conv2d_0c_7x1') branch_1 = slim.conv2d(branch_1, 320, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3') return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
Example #27
Source File: inception_v4.py From DOTA_models with Apache License 2.0 | 6 votes |
def block_inception_c(inputs, scope=None, reuse=None): """Builds Inception-C block for Inception v4 network.""" # By default use stride=1 and SAME padding with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], stride=1, padding='SAME'): with tf.variable_scope(scope, 'BlockInceptionC', [inputs], reuse=reuse): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1') branch_1 = tf.concat(axis=3, values=[ slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'), slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')]) with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1') branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3') branch_2 = tf.concat(axis=3, values=[ slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'), slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')]) with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1') return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
Example #28
Source File: gru_cell.py From DOTA_models with Apache License 2.0 | 6 votes |
def _w_h_initializer(self): """Returns an initializer for the "W_h" parameter matrix. See equation (23) in the paper. The "W_h" parameter matrix is the concatenation of two parameter submatrices. The matrix returned is [U_z, U_r]. Returns: A Tensor with shape [num_units, 2 * num_units] as described above. """ def _initializer(shape, dtype=tf.float32, partition_info=None): num_units = self._num_units assert shape == [num_units, 2 * num_units] u_z = self._u_initializer([num_units, num_units], dtype, partition_info) u_r = self._u_initializer([num_units, num_units], dtype, partition_info) return tf.concat([u_z, u_r], 1) return _initializer
Example #29
Source File: real_nvp_utils.py From DOTA_models with Apache License 2.0 | 6 votes |
def depool_2x2(input_, stride=2): """Depooling.""" shape = input_.get_shape().as_list() batch_size = shape[0] height = shape[1] width = shape[2] channels = shape[3] res = tf.reshape(input_, [batch_size, height, 1, width, 1, channels]) res = tf.concat( axis=2, values=[res, tf.zeros([batch_size, height, stride - 1, width, 1, channels])]) res = tf.concat(axis=4, values=[ res, tf.zeros([batch_size, height, stride, width, stride - 1, channels]) ]) res = tf.reshape(res, [batch_size, stride * height, stride * width, channels]) return res # random flip on a batch of images
Example #30
Source File: real_nvp_utils.py From DOTA_models with Apache License 2.0 | 6 votes |
def batch_random_flip(input_): """Simultaneous horizontal random flip.""" if isinstance(input_, (float, int)): return input_ shape = input_.get_shape().as_list() batch_size = shape[0] height = shape[1] width = shape[2] channels = shape[3] res = tf.split(axis=0, num_or_size_splits=batch_size, value=input_) res = [elem[0, :, :, :] for elem in res] res = [tf.image.random_flip_left_right(elem) for elem in res] res = [tf.reshape(elem, [1, height, width, channels]) for elem in res] res = tf.concat(axis=0, values=res) return res # build a one hot representation corresponding to the integer tensor # the one-hot dimension is appended to the integer tensor shape