Python keras.backend.ones_like() Examples
The following are 30
code examples of keras.backend.ones_like().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example #1
Source File: layers.py From research with BSD 3-Clause "New" or "Revised" License | 6 votes |
def get_constants(self, x): constants = [] if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.concatenate([ones] * self.output_dim, 1) B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones) constants.append(B_U) else: constants.append(K.cast_to_floatx(1.)) if self.consume_less == 'cpu' and 0 < self.dropout_W < 1: input_shape = self.input_spec[0].shape input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.concatenate([ones] * input_dim, 1) B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones) constants.append(B_W) else: constants.append(K.cast_to_floatx(1.)) return constants
Example #2
Source File: bigru_index_selector.py From deep_qa with Apache License 2.0 | 6 votes |
def call(self, inputs, mask=None): """ Extract the GRU output for the target document index for the forward and backwards GRU outputs, and then concatenate them. If the target word index is at index l, and there are T total document words, the desired output in the forward pass is at GRU_f[l] (ignoring the batched case) and the desired output of the backwards pass is at GRU_b[T-l]. We need to get these two vectors and concatenate them. To do so, we'll reverse the backwards GRU, which allows us to use the same index/mask for both. """ # TODO(nelson): deal with case where cloze token appears multiple times # in a question. word_indices, gru_f, gru_b = inputs index_mask = K.cast(K.equal((K.ones_like(word_indices) * self.target_index), word_indices), "float32") gru_mask = K.repeat_elements(K.expand_dims(index_mask, -1), K.int_shape(gru_f)[-1], K.ndim(gru_f) - 1) masked_gru_f = switch(gru_mask, gru_f, K.zeros_like(gru_f)) selected_gru_f = K.sum(masked_gru_f, axis=1) masked_gru_b = switch(gru_mask, gru_b, K.zeros_like(gru_b)) selected_gru_b = K.sum(masked_gru_b, axis=1) selected_bigru = K.concatenate([selected_gru_f, selected_gru_b], axis=-1) return selected_bigru
Example #3
Source File: rtn.py From ikelos with MIT License | 6 votes |
def get_constants(self, x): constants = [] if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.concatenate([ones] * self.output_dim, 1) B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)] constants.append(B_U) else: constants.append([K.cast_to_floatx(1.) for _ in range(3)]) if 0 < self.dropout_W < 1: input_shape = self.input_spec[0].shape input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.concatenate([ones] * input_dim, 1) B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)] constants.append(B_W) else: constants.append([K.cast_to_floatx(1.) for _ in range(3)]) return constants
Example #4
Source File: rnnrbm.py From keras_bn_library with MIT License | 6 votes |
def get_constants(self, x): constants = [] if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, self.hidden_recurrent_dim)) B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones) constants.append(B_U) else: constants.append(K.cast_to_floatx(1.)) if self.consume_less == 'cpu' and 0 < self.dropout_W < 1: input_shape = self.input_spec[0].shape input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, input_dim)) B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones) constants.append(B_W) else: constants.append(K.cast_to_floatx(1.)) return constants
Example #5
Source File: recurrent.py From keras_bn_library with MIT License | 6 votes |
def get_constants(self, x): constants = [] if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, self.input_dim)) B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)] constants.append(B_U) else: constants.append([K.cast_to_floatx(1.) for _ in range(4)]) if 0 < self.dropout_W < 1: input_shape = K.int_shape(x) input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, int(input_dim))) B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)] constants.append(B_W) else: constants.append([K.cast_to_floatx(1.) for _ in range(4)]) return constants
Example #6
Source File: rhn.py From deep-models with Apache License 2.0 | 6 votes |
def get_constants(self, x): constants = [] if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, self.output_dim)) B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)] constants.append(B_U) else: constants.append([K.cast_to_floatx(1.) for _ in range(3)]) if 0 < self.dropout_W < 1: input_shape = self.input_spec[0].shape input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, input_dim)) B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)] constants.append(B_W) else: constants.append([K.cast_to_floatx(1.) for _ in range(3)]) return constants
Example #7
Source File: rtn.py From ikelos with MIT License | 6 votes |
def get_constants(self, x): constants = [] if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.concatenate([ones] * self.output_dim, 1) B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)] constants.append(B_U) else: constants.append([K.cast_to_floatx(1.) for _ in range(4)]) if 0 < self.dropout_W < 1: input_shape = self.input_spec[0].shape input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.concatenate([ones] * input_dim, 1) B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)] constants.append(B_W) else: constants.append([K.cast_to_floatx(1.) for _ in range(4)]) return constants
Example #8
Source File: lstm2ntm.py From NTM-Keras with MIT License | 6 votes |
def get_constants(self, x): constants = [] if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, self.output_dim)) B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)] constants.append(B_U) else: constants.append([K.cast_to_floatx(1.) for _ in range(4)]) if 0 < self.dropout_W < 1: input_shape = self.input_spec[0].shape input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, int(input_dim))) B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)] constants.append(B_W) else: constants.append([K.cast_to_floatx(1.) for _ in range(4)]) return constants
Example #9
Source File: QuestionPooling.py From R-NET-in-Keras with MIT License | 6 votes |
def call(self, inputs, mask=None): assert(isinstance(inputs, list) and len(inputs) == 5) uQ, WQ_u, WQ_v, v, VQ_r = inputs uQ_mask = mask[0] if mask is not None else None ones = K.ones_like(K.sum(uQ, axis=1, keepdims=True)) # (B, 1, 2H) s_hat = K.dot(uQ, WQ_u) s_hat += K.dot(ones, K.dot(WQ_v, VQ_r)) s_hat = K.tanh(s_hat) s = K.dot(s_hat, v) s = K.batch_flatten(s) a = softmax(s, mask=uQ_mask, axis=1) rQ = K.batch_dot(uQ, a, axes=[1, 1]) return rQ
Example #10
Source File: ternary_ops.py From nn_playground with MIT License | 6 votes |
def _ternarize(W, H=1): '''The weights' ternarization function, # References: - [Recurrent Neural Networks with Limited Numerical Precision](http://arxiv.org/abs/1608.06902) - [Ternary Weight Networks](http://arxiv.org/abs/1605.04711) ''' W /= H ones = K.ones_like(W) zeros = K.zeros_like(W) Wt = switch(W > 0.5, ones, switch(W <= -0.5, -ones, zeros)) Wt *= H return Wt
Example #11
Source File: rnnlayer.py From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License | 6 votes |
def get_constants(self, inputs, training=None): constants = [] '''if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, self.units)) B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)] constants.append(B_U) else: constants.append([K.cast_to_floatx(1.) for _ in range(3)]) if 0 < self.dropout_W < 1: input_shape = K.int_shape(x) input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, int(input_dim))) B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)] constants.append(B_W) else:''' constants.append([K.cast_to_floatx(1.) for _ in range(3)]) return constants
Example #12
Source File: rnnlayer.py From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License | 6 votes |
def get_constants(self, inputs, training=None): constants = [] '''if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, self.units)) B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)] constants.append(B_U) else: constants.append([K.cast_to_floatx(1.) for _ in range(3)]) if 0 < self.dropout_W < 1: input_shape = K.int_shape(x) input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, int(input_dim))) B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)] constants.append(B_W) else:''' constants.append([K.cast_to_floatx(1.) for _ in range(3)]) return constants
Example #13
Source File: rnnlayer.py From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License | 6 votes |
def get_constants(self, inputs, training=None): constants = [] '''if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, self.units)) B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)] constants.append(B_U) else: constants.append([K.cast_to_floatx(1.) for _ in range(3)]) if 0 < self.dropout_W < 1: input_shape = K.int_shape(x) input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, int(input_dim))) B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)] constants.append(B_W) else:''' constants.append([K.cast_to_floatx(1.) for _ in range(3)]) return constants
Example #14
Source File: pooling.py From onto-lstm with Apache License 2.0 | 6 votes |
def call(self, x, mask=None): mean = super(IntraAttention, self).call(x, mask) # x: (batch_size, input_length, input_dim) # mean: (batch_size, input_dim) ones = K.expand_dims(K.mean(K.ones_like(x), axis=(0, 2)), dim=0) # (1, input_length) # (batch_size, input_length, input_dim) tiled_mean = K.permute_dimensions(K.dot(K.expand_dims(mean), ones), (0, 2, 1)) if mask is not None: if K.ndim(mask) > K.ndim(x): # Assuming this is because of the bug in Bidirectional. Temporary fix follows. # TODO: Fix Bidirectional. mask = K.any(mask, axis=(-2, -1)) if K.ndim(mask) < K.ndim(x): mask = K.expand_dims(mask) x = switch(mask, x, K.zeros_like(x)) # (batch_size, input_length, proj_dim) projected_combination = K.tanh(K.dot(x, self.vector_projector) + K.dot(tiled_mean, self.mean_projector)) scores = K.dot(projected_combination, self.scorer) # (batch_size, input_length) weights = K.softmax(scores) # (batch_size, input_length) attended_x = K.sum(K.expand_dims(weights) * x, axis=1) # (batch_size, input_dim) return attended_x
Example #15
Source File: QnA.py From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License | 6 votes |
def get_constants(self, x): constants = [] if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, self.output_dim)) B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)] constants.append(B_U) else: constants.append([K.cast_to_floatx(1.) for _ in range(3)]) if 0 < self.dropout_W < 1: input_shape = K.int_shape(x) input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.tile(ones, (1, int(input_dim))) B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)] constants.append(B_W) else: constants.append([K.cast_to_floatx(1.) for _ in range(3)]) return constants
Example #16
Source File: losses.py From Kaggle-Carvana-Image-Masking-Challenge with MIT License | 6 votes |
def weighted_bce_dice_loss(y_true, y_pred): y_true = K.cast(y_true, 'float32') y_pred = K.cast(y_pred, 'float32') # if we want to get same size of output, kernel size must be odd number if K.int_shape(y_pred)[1] == 128: kernel_size = 11 elif K.int_shape(y_pred)[1] == 256: kernel_size = 21 elif K.int_shape(y_pred)[1] == 512: kernel_size = 21 elif K.int_shape(y_pred)[1] == 1024: kernel_size = 41 else: raise ValueError('Unexpected image size') averaged_mask = K.pool2d( y_true, pool_size=(kernel_size, kernel_size), strides=(1, 1), padding='same', pool_mode='avg') border = K.cast(K.greater(averaged_mask, 0.005), 'float32') * K.cast(K.less(averaged_mask, 0.995), 'float32') weight = K.ones_like(averaged_mask) w0 = K.sum(weight) weight += border * 2 w1 = K.sum(weight) weight *= (w0 / w1) loss = weighted_bce_loss(y_true, y_pred, weight) + (1 - weighted_dice_coeff(y_true, y_pred, weight)) return loss
Example #17
Source File: metrics.py From deepcpg with MIT License | 6 votes |
def contingency_table(y, z): """Compute contingency table.""" y = K.round(y) z = K.round(z) def count_matches(a, b): tmp = K.concatenate([a, b]) return K.sum(K.cast(K.all(tmp, -1), K.floatx())) ones = K.ones_like(y) zeros = K.zeros_like(y) y_ones = K.equal(y, ones) y_zeros = K.equal(y, zeros) z_ones = K.equal(z, ones) z_zeros = K.equal(z, zeros) tp = count_matches(y_ones, z_ones) tn = count_matches(y_zeros, z_zeros) fp = count_matches(y_zeros, z_ones) fn = count_matches(y_ones, z_zeros) return (tp, tn, fp, fn)
Example #18
Source File: losses.py From Kaggle-Carvana-Image-Masking-Challenge with MIT License | 6 votes |
def weighted_dice_loss(y_true, y_pred): y_true = K.cast(y_true, 'float32') y_pred = K.cast(y_pred, 'float32') # if we want to get same size of output, kernel size must be odd number if K.int_shape(y_pred)[1] == 128: kernel_size = 11 elif K.int_shape(y_pred)[1] == 256: kernel_size = 21 elif K.int_shape(y_pred)[1] == 512: kernel_size = 21 elif K.int_shape(y_pred)[1] == 1024: kernel_size = 41 else: raise ValueError('Unexpected image size') averaged_mask = K.pool2d( y_true, pool_size=(kernel_size, kernel_size), strides=(1, 1), padding='same', pool_mode='avg') border = K.cast(K.greater(averaged_mask, 0.005), 'float32') * K.cast(K.less(averaged_mask, 0.995), 'float32') weight = K.ones_like(averaged_mask) w0 = K.sum(weight) weight += border * 2 w1 = K.sum(weight) weight *= (w0 / w1) loss = 1 - weighted_dice_coeff(y_true, y_pred, weight) return loss
Example #19
Source File: layers.py From research with BSD 3-Clause "New" or "Revised" License | 6 votes |
def get_constants(self, x): constants = [] if 0 < self.dropout_U < 1: ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.concatenate([ones] * self.output_dim, 1) B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones) constants.append(B_U) else: constants.append(K.cast_to_floatx(1.)) if self.consume_less == 'cpu' and 0 < self.dropout_W < 1: input_shape = self.input_spec[0].shape input_dim = input_shape[-1] ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1))) ones = K.concatenate([ones] * input_dim, 1) B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones) constants.append(B_W) else: constants.append(K.cast_to_floatx(1.)) return constants
Example #20
Source File: backend.py From deep_qa with Apache License 2.0 | 5 votes |
def tile_vector(vector, matrix): """ NOTE: If your matrix has known shape (i.e., the relevant dimension from `K.int_shape(matrix) is not None`), you should just use `K.repeat_elements(vector)` instead of this. This method works, however, when the number of rows in your matrix is unknown at graph compilation time. This method takes a (collection of) vector(s) (shape: (batch_size, vector_dim)), and tiles that vector a number of times, giving a matrix of shape (batch_size, tile_length, vector_dim). (I say "vector" and "matrix" here because I'm ignoring the batch_size). We need the matrix as input so we know what the tile_length is - the matrix is otherwise ignored. This is necessary in a number of places in the code. For instance, if you want to do a dot product of a vector with all of the vectors in a matrix, the most efficient way to do that is to tile the vector first, then do an element-wise product with the matrix, then sum out the last mode. So, we capture this functionality here. This is not done as a Keras Layer, however; if you want to use this function, you'll need to do it _inside_ of a Layer somehow, either in a Lambda or in the call() method of a Layer you're writing. """ # Tensorflow can't use unknown sizes at runtime, so we have to make use of the broadcasting # ability of TF and Theano instead to create the tiled sentence encoding. # Shape: (tile_length, batch_size, vector_dim) k_ones = K.permute_dimensions(K.ones_like(matrix), [1, 0, 2]) # Now we have a (tile_length, batch_size, vector_dim)*(batch_size, vector_dim) # elementwise multiplication which is broadcast. We then reshape back. tiled_vector = K.permute_dimensions(k_ones * vector, [1, 0, 2]) return tiled_vector
Example #21
Source File: transform_rnn.py From View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition with MIT License | 5 votes |
def _trans(theta): tx = theta[:,3:4] ty = theta[:,4:5] tz = theta[:,5:6] zero = K.zeros_like(tx) one = K.ones_like(tx) first = K.reshape(K.concatenate([one,zero,zero,tx],axis=1),(-1,1,4)) second = K.reshape(K.concatenate([zero,one,zero,ty],axis=1),(-1,1,4)) third = K.reshape(K.concatenate([zero,zero,one,tz],axis=1),(-1,1,4)) trans = K.concatenate([first,second,third],axis=1) trans = trans.reshape((trans.shape[0],3,4)) return trans
Example #22
Source File: metrics.py From deepcpg with MIT License | 5 votes |
def _sample_weights(y, mask=None): """Compute sample weights.""" if mask is None: weights = K.ones_like(y) else: weights = 1 - K.cast(K.equal(y, mask), K.floatx()) return weights
Example #23
Source File: overlap.py From deep_qa with Apache License 2.0 | 5 votes |
def call(self, inputs, mask=None): # tensor_a, mask_a are of shape (batch size, length_a) # tensor_b mask_b are of shape (batch size, length_b) tensor_a, tensor_b = inputs if mask is None: mask_b = K.ones_like(tensor_b) else: mask_b = mask[1] length_a = K.int_shape(tensor_a)[1] length_b = K.int_shape(tensor_b)[1] # change the indices that are masked in b to -1, since no indices # in the document will ever be -1. tensor_b = K.cast(switch(mask_b, tensor_b, -1*K.ones_like(tensor_b)), "int32") # reshape tensor_a to shape (batch_size, length_a, length_b) tensor_a_tiled = K.repeat_elements(K.expand_dims(tensor_a, 2), length_b, axis=2) # reshape tensor_b to shape (batch_size, length_a, length_b) tensor_b_tiled = K.repeat_elements(K.expand_dims(tensor_b, 1), length_a, axis=1) overlap_mask = K.cast(K.equal(tensor_a_tiled, tensor_b_tiled), "float32") indices_overlap = K.sum(overlap_mask, axis=-1) binary_indices_overlap = K.cast(K.not_equal(indices_overlap, K.zeros_like(indices_overlap)), "int32") one_hot_overlap = K.cast(K.one_hot(binary_indices_overlap, 2), "float32") return one_hot_overlap
Example #24
Source File: rnnlayer.py From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License | 5 votes |
def time_distributed_dense(x, w, b=None, dropout=None, input_dim=None, units=None, timesteps=None): """Apply `y . w + b` for every temporal slice y of x. # Arguments x: input tensor. w: weight matrix. b: optional bias vector. dropout: wether to apply dropout (same dropout mask for every temporal slice of the input). input_dim: integer; optional dimensionality of the input. units: integer; optional dimensionality of the output. timesteps: integer; optional number of timesteps. # Returns Output tensor. """ if not input_dim: input_dim = K.shape(x)[2] if not timesteps: timesteps = K.shape(x)[1] if not units: units = K.shape(w)[1] if dropout is not None and 0. < dropout < 1.: # apply the same dropout pattern at every timestep ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim))) dropout_matrix = K.dropout(ones, dropout) expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps) x = K.in_train_phase(x * expanded_dropout_matrix, x) # collapse time dimension and batch dimension together x = K.reshape(x, (-1, input_dim)) x = K.dot(x, w) if b: x += b # reshape to 3D tensor if K.backend() == 'tensorflow': x = K.reshape(x, K.stack([-1, timesteps, units])) x.set_shape([None, None, units]) else: x = K.reshape(x, (-1, timesteps, units)) return x
Example #25
Source File: replace_masked_values.py From deep_qa with Apache License 2.0 | 5 votes |
def call(self, inputs, mask=None): if mask is None: # It turns out that Keras doesn't like it if you just return inputs, so we need to # return a different tensor object. Just doing a cast apparently doesn't work, either, # so we'll add 0. return inputs + 0.0 return switch(mask, inputs, K.ones_like(inputs) * self.replace_with)
Example #26
Source File: multiply.py From deep_qa with Apache License 2.0 | 5 votes |
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument tensor_1, tensor_2 = inputs tensor_1_mask, tensor_2_mask = mask if tensor_1_mask is None: tensor_1_mask = K.ones_like(tensor_1) if tensor_2_mask is None: tensor_2_mask = K.ones_like(tensor_2) tensor_1_mask, tensor_2_mask = self.expand_dims_if_necessary(tensor_1_mask, tensor_2_mask) return K.cast(tensor_1_mask, 'uint8') * K.cast(tensor_2_mask, 'uint8')
Example #27
Source File: loss.py From pixel-decoder with MIT License | 5 votes |
def weighted_bce_dice_loss(y_true, y_pred): y_true = K.cast(y_true, 'float32') y_pred = K.cast(y_pred, 'float32') # if we want to get same size of output, kernel size must be odd number averaged_mask = K.pool2d( y_true, pool_size=(11, 11), strides=(1, 1), padding='same', pool_mode='avg') border = K.cast(K.greater(averaged_mask, 0.005), 'float32') * K.cast(K.less(averaged_mask, 0.995), 'float32') weight = K.ones_like(averaged_mask) w0 = K.sum(weight) weight += border * 2 w1 = K.sum(weight) weight *= (w0 / w1) loss = weighted_bce_loss(y_true, y_pred, weight) + \ weighted_dice_loss(y_true, y_pred, weight) return loss
Example #28
Source File: yolov3.py From keras-onnx with MIT License | 5 votes |
def call(self, inputs, **kwargs): boxes = inputs[0] box_scores = inputs[1] box_scores_transpose = tf.transpose(box_scores, perm=[1, 0]) boxes_number = tf.shape(boxes)[0] box_range = tf.range(boxes_number) mask = box_scores >= self.score_threshold max_boxes_tensor = K.constant(self.max_boxes, dtype='int32') classes_ = [] batch_indexs_ = [] nms_indexes_ = [] class_box_range_ = [] for c in range(self.num_classes): class_boxes = tf.boolean_mask(boxes, mask[:, c]) class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c]) class_box_range = tf.boolean_mask(box_range, mask[:, c]) nms_index = tf.image.non_max_suppression( class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=self.iou_threshold) class_box_scores = K.gather(class_box_scores, nms_index) class_box_range = K.gather(class_box_range, nms_index) classes = K.ones_like(class_box_scores, 'int32') * c batch_index = K.zeros_like(class_box_scores, 'int32') batch_indexs_.append(batch_index) classes_.append(classes) nms_indexes_.append(nms_index) class_box_range_.append(class_box_range) classes_ = K.concatenate(classes_, axis=0) batch_indexs_ = K.concatenate(batch_indexs_, axis=0) class_box_range_ = K.concatenate(class_box_range_, axis=0) boxes_1 = tf.expand_dims(boxes, 0) classes_1 = tf.expand_dims(classes_, 1) batch_indexs_ = tf.expand_dims(batch_indexs_, 1) class_box_range_ = tf.expand_dims(class_box_range_, 1) box_scores_transpose_1 = tf.expand_dims(box_scores_transpose, 0) nms_final_ = K.concatenate([batch_indexs_, classes_1, class_box_range_], axis=1) nms_final_1 = tf.expand_dims(nms_final_, 0) return [boxes_1, box_scores_transpose_1, nms_final_1]
Example #29
Source File: pooling.py From enet-keras with MIT License | 5 votes |
def call(self, inputs, output_shape=None): """ Seen on https://github.com/tensorflow/tensorflow/issues/2169 Replace with unpool op when/if issue merged Add theano backend """ updates, mask = inputs[0], inputs[1] with K.tf.variable_scope(self.name): mask = K.cast(mask, 'int32') input_shape = K.tf.shape(updates, out_type='int32') # calculation new shape if output_shape is None: output_shape = (input_shape[0], input_shape[1] * self.size[0], input_shape[2] * self.size[1], input_shape[3]) self.output_shape1 = output_shape # calculation indices for batch, height, width and feature maps one_like_mask = K.ones_like(mask, dtype='int32') batch_shape = K.concatenate([[input_shape[0]], [1], [1], [1]], axis=0) batch_range = K.reshape(K.tf.range(output_shape[0], dtype='int32'), shape=batch_shape) b = one_like_mask * batch_range y = mask // (output_shape[2] * output_shape[3]) x = (mask // output_shape[3]) % output_shape[2] feature_range = K.tf.range(output_shape[3], dtype='int32') f = one_like_mask * feature_range # transpose indices & reshape update values to one dimension updates_size = K.tf.size(updates) indices = K.transpose(K.reshape(K.stack([b, y, x, f]), [4, updates_size])) values = K.reshape(updates, [updates_size]) ret = K.tf.scatter_nd(indices, values, output_shape) return ret
Example #30
Source File: backend.py From deep_qa with Apache License 2.0 | 5 votes |
def tile_scalar(scalar, vector): """ NOTE: If your vector has known shape (i.e., the relevant dimension from `K.int_shape(vector) is not None`), you should just use `K.repeat_elements(scalar)` instead of this. This method works, however, when the number of entries in your vector is unknown at graph compilation time. This method takes a (collection of) scalar(s) (shape: (batch_size, 1)), and tiles that scala a number of times, giving a vector of shape (batch_size, tile_length). (I say "scalar" and "vector" here because I'm ignoring the batch_size). We need the vector as input so we know what the tile_length is - the vector is otherwise ignored. This is not done as a Keras Layer, however; if you want to use this function, you'll need to do it _inside_ of a Layer somehow, either in a Lambda or in the call() method of a Layer you're writing. TODO(matt): we could probably make a more general `tile_tensor` method, which can do this for any dimenionsality. There is another place in the code where we do this with a matrix and a tensor; all three of these can probably be one function. """ # Tensorflow can't use unknown sizes at runtime, so we have to make use of the broadcasting # ability of TF and Theano instead to create the tiled sentence encoding. # Shape: (tile_length, batch_size) k_ones = K.permute_dimensions(K.ones_like(vector), [1, 0]) # Now we have a (tile_length, batch_size) * (batch_size, 1) elementwise multiplication which is # broadcast. We then reshape back. tiled_scalar = K.permute_dimensions(k_ones * K.squeeze(scalar, axis=1), [1, 0]) return tiled_scalar