Python keras.backend.ndim() Examples
The following are 30
code examples of keras.backend.ndim().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example #1
Source File: timedistributed.py From fancy-cnn with MIT License | 7 votes |
def get_output(self, train=False): def format_shape(shape): if K._BACKEND == 'tensorflow': def trf(x): try: return int(x) except TypeError: return x return map(trf, shape) return shape X = self.get_input(train) in_shape = format_shape(K.shape(X)) batch_flatten_len = K.prod(in_shape[:2]) cast_in_shape = (batch_flatten_len, ) + tuple(in_shape[i] for i in range(2, K.ndim(X))) pre_outs = self.layer(K.reshape(X, cast_in_shape)) out_shape = format_shape(K.shape(pre_outs)) cast_out_shape = (in_shape[0], in_shape[1]) + tuple(out_shape[i] for i in range(1, K.ndim(pre_outs))) outputs = K.reshape(pre_outs, cast_out_shape) return outputs
Example #2
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def style_loss(style_image, target_image, style_masks, target_masks): '''Calculate style loss between style_image and target_image, in all regions. ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 3 == K.ndim(style_masks) == K.ndim(target_masks) loss = K.variable(0) for i in range(num_labels): if K.image_data_format() == 'channels_first': style_mask = style_masks[i, :, :] target_mask = target_masks[i, :, :] else: style_mask = style_masks[:, :, i] target_mask = target_masks[:, :, i] loss += region_style_loss(style_image, target_image, style_mask, target_mask) return loss
Example #3
Source File: utils.py From deep_complex_networks with MIT License | 6 votes |
def get_realpart(x): image_format = K.image_data_format() ndim = K.ndim(x) input_shape = K.shape(x) if (image_format == 'channels_first' and ndim != 3) or ndim == 2: input_dim = input_shape[1] // 2 return x[:, :input_dim] input_dim = input_shape[-1] // 2 if ndim == 3: return x[:, :, :input_dim] elif ndim == 4: return x[:, :, :, :input_dim] elif ndim == 5: return x[:, :, :, :, :input_dim]
Example #4
Source File: utils.py From deep_complex_networks with MIT License | 6 votes |
def get_imagpart(x): image_format = K.image_data_format() ndim = K.ndim(x) input_shape = K.shape(x) if (image_format == 'channels_first' and ndim != 3) or ndim == 2: input_dim = input_shape[1] // 2 return x[:, input_dim:] input_dim = input_shape[-1] // 2 if ndim == 3: return x[:, :, input_dim:] elif ndim == 4: return x[:, :, :, input_dim:] elif ndim == 5: return x[:, :, :, :, input_dim:]
Example #5
Source File: models.py From voxelmorph with GNU General Public License v3.0 | 6 votes |
def _softmax(x, axis=-1, alpha=1): """ building on keras implementation, allow alpha parameter Softmax activation function. # Arguments x : Tensor. axis: Integer, axis along which the softmax normalization is applied. alpha: a value to multiply all x # Returns Tensor, output of softmax transformation. # Raises ValueError: In case `dim(x) == 1`. """ x = alpha * x ndim = K.ndim(x) if ndim == 2: return K.softmax(x) elif ndim > 2: e = K.exp(x - K.max(x, axis=axis, keepdims=True)) s = K.sum(e, axis=axis, keepdims=True) return e / s else: raise ValueError('Cannot apply softmax to a tensor that is 1D')
Example #6
Source File: backend.py From bert4keras with Apache License 2.0 | 6 votes |
def sequence_masking(x, mask, mode=0, axis=None): """为序列条件mask的函数 mask: 形如(batch_size, seq_len)的0-1矩阵; mode: 如果是0,则直接乘以mask; 如果是1,则在padding部分减去一个大正数。 axis: 序列所在轴,默认为1; """ if mask is None or mode not in [0, 1]: return x else: if axis is None: axis = 1 if axis == -1: axis = K.ndim(x) - 1 assert axis > 0, 'axis muse be greater than 0' for _ in range(axis - 1): mask = K.expand_dims(mask, 1) for _ in range(K.ndim(x) - K.ndim(mask) - axis + 1): mask = K.expand_dims(mask, K.ndim(mask)) if mode == 0: return x * mask else: return x - (1 - mask) * 1e12
Example #7
Source File: losses.py From style-transfer with MIT License | 6 votes |
def gram_matrix(x): """ Computes the outer-product of the input tensor x. Input ----- - x: input tensor of shape (C x H x W) Returns ------- - x . x^T Note that this can be computed efficiently if x is reshaped as a tensor of shape (C x H*W). """ # assert K.ndim(x) == 3 if K.image_dim_ordering() == 'th': features = K.batch_flatten(x) else: features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) return K.dot(features, K.transpose(features))
Example #8
Source File: seq2seq_attention_2.py From stock-price-predict with MIT License | 6 votes |
def softmax(x, axis=1): """Softmax activation function. # Arguments x : Tensor. axis: Integer, axis along which the softmax normalization is applied. # Returns Tensor, output of softmax transformation. # Raises ValueError: In case `dim(x) == 1`. """ ndim = K.ndim(x) if ndim == 2: return K.softmax(x) elif ndim > 2: e = K.exp(x - K.max(x, axis=axis, keepdims=True)) s = K.sum(e, axis=axis, keepdims=True) return e / s else: raise ValueError('Cannot apply softmax to a tensor that is 1D')
Example #9
Source File: seq2seq_attention.py From stock-price-predict with MIT License | 6 votes |
def softmax(x, axis=1): """Softmax activation function. # Arguments x : Tensor. axis: Integer, axis along which the softmax normalization is applied. # Returns Tensor, output of softmax transformation. # Raises ValueError: In case `dim(x) == 1`. """ ndim = K.ndim(x) if ndim == 2: return K.softmax(x) elif ndim > 2: e = K.exp(x - K.max(x, axis=axis, keepdims=True)) s = K.sum(e, axis=axis, keepdims=True) return e / s else: raise ValueError('Cannot apply softmax to a tensor that is 1D')
Example #10
Source File: crf.py From keras-contrib with MIT License | 6 votes |
def call(self, X, mask=None): if mask is not None: assert K.ndim(mask) == 2, 'Input mask to CRF must have dim 2 if not None' if self.test_mode == 'viterbi': test_output = self.viterbi_decoding(X, mask) else: test_output = self.get_marginal_prob(X, mask) self.uses_learning_phase = True if self.learn_mode == 'join': train_output = K.zeros_like(K.dot(X, self.kernel)) out = K.in_train_phase(train_output, test_output) else: if self.test_mode == 'viterbi': train_output = self.get_marginal_prob(X, mask) out = K.in_train_phase(train_output, test_output) else: out = test_output return out
Example #11
Source File: SparseFullyConnectedLayer.py From NeuralResponseRanking with MIT License | 6 votes |
def __init__(self, output_dim, init='glorot_uniform', activation='relu',weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, input_dim=None, **kwargs): self.W_initializer = initializers.get(init) self.b_initializer = initializers.get('zeros') self.activation = activations.get(activation) self.output_dim = output_dim self.input_dim = input_dim self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.initial_weights = weights self.input_spec = InputSpec(ndim=2) if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(SparseFullyConnectedLayer, self).__init__(**kwargs)
Example #12
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def style_loss(style_image, target_image, style_masks, target_masks): '''Calculate style loss between style_image and target_image, in all regions. ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 3 == K.ndim(style_masks) == K.ndim(target_masks) loss = K.variable(0) for i in range(num_labels): if K.image_data_format() == 'channels_first': style_mask = style_masks[i, :, :] target_mask = target_masks[i, :, :] else: style_mask = style_masks[:, :, i] target_mask = target_masks[:, :, i] loss += region_style_loss(style_image, target_image, style_mask, target_mask) return loss
Example #13
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def region_style_loss(style_image, target_image, style_mask, target_mask): '''Calculate style loss between style_image and target_image, for one common region specified by their (boolean) masks ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 2 == K.ndim(style_mask) == K.ndim(target_mask) if K.image_data_format() == 'channels_first': masked_style = style_image * style_mask masked_target = target_image * target_mask num_channels = K.shape(style_image)[0] else: masked_style = K.permute_dimensions( style_image, (2, 0, 1)) * style_mask masked_target = K.permute_dimensions( target_image, (2, 0, 1)) * target_mask num_channels = K.shape(style_image)[-1] num_channels = K.cast(num_channels, dtype='float32') s = gram_matrix(masked_style) / K.mean(style_mask) / num_channels c = gram_matrix(masked_target) / K.mean(target_mask) / num_channels return K.mean(K.square(s - c))
Example #14
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def style_loss(style_image, target_image, style_masks, target_masks): '''Calculate style loss between style_image and target_image, in all regions. ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 3 == K.ndim(style_masks) == K.ndim(target_masks) loss = K.variable(0) for i in range(num_labels): if K.image_data_format() == 'channels_first': style_mask = style_masks[i, :, :] target_mask = target_masks[i, :, :] else: style_mask = style_masks[:, :, i] target_mask = target_masks[:, :, i] loss += region_style_loss(style_image, target_image, style_mask, target_mask) return loss
Example #15
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def total_variation_loss(x): assert 4 == K.ndim(x) if K.image_data_format() == 'channels_first': a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, 1:, :img_ncols - 1]) b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, :img_nrows - 1, 1:]) else: a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, 1:, :img_ncols - 1, :]) b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :]) return K.sum(K.pow(a + b, 1.25)) # Overall loss is the weighted sum of content_loss, style_loss and tv_loss # Each individual loss uses features from image/mask models.
Example #16
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def style_loss(style_image, target_image, style_masks, target_masks): '''Calculate style loss between style_image and target_image, in all regions. ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 3 == K.ndim(style_masks) == K.ndim(target_masks) loss = K.variable(0) for i in range(num_labels): if K.image_data_format() == 'channels_first': style_mask = style_masks[i, :, :] target_mask = target_masks[i, :, :] else: style_mask = style_masks[:, :, i] target_mask = target_masks[:, :, i] loss += region_style_loss(style_image, target_image, style_mask, target_mask) return loss
Example #17
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def total_variation_loss(x): assert 4 == K.ndim(x) if K.image_data_format() == 'channels_first': a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, 1:, :img_ncols - 1]) b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, :img_nrows - 1, 1:]) else: a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, 1:, :img_ncols - 1, :]) b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :]) return K.sum(K.pow(a + b, 1.25)) # Overall loss is the weighted sum of content_loss, style_loss and tv_loss # Each individual loss uses features from image/mask models.
Example #18
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def region_style_loss(style_image, target_image, style_mask, target_mask): '''Calculate style loss between style_image and target_image, for one common region specified by their (boolean) masks ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 2 == K.ndim(style_mask) == K.ndim(target_mask) if K.image_data_format() == 'channels_first': masked_style = style_image * style_mask masked_target = target_image * target_mask num_channels = K.shape(style_image)[0] else: masked_style = K.permute_dimensions( style_image, (2, 0, 1)) * style_mask masked_target = K.permute_dimensions( target_image, (2, 0, 1)) * target_mask num_channels = K.shape(style_image)[-1] num_channels = K.cast(num_channels, dtype='float32') s = gram_matrix(masked_style) / K.mean(style_mask) / num_channels c = gram_matrix(masked_target) / K.mean(target_mask) / num_channels return K.mean(K.square(s - c))
Example #19
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def style_loss(style_image, target_image, style_masks, target_masks): '''Calculate style loss between style_image and target_image, in all regions. ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 3 == K.ndim(style_masks) == K.ndim(target_masks) loss = K.variable(0) for i in range(num_labels): if K.image_data_format() == 'channels_first': style_mask = style_masks[i, :, :] target_mask = target_masks[i, :, :] else: style_mask = style_masks[:, :, i] target_mask = target_masks[:, :, i] loss += region_style_loss(style_image, target_image, style_mask, target_mask) return loss
Example #20
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def total_variation_loss(x): assert 4 == K.ndim(x) if K.image_data_format() == 'channels_first': a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, 1:, :img_ncols - 1]) b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, :img_nrows - 1, 1:]) else: a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, 1:, :img_ncols - 1, :]) b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :]) return K.sum(K.pow(a + b, 1.25)) # Overall loss is the weighted sum of content_loss, style_loss and tv_loss # Each individual loss uses features from image/mask models.
Example #21
Source File: SparseFullyConnectedLayer.py From NeuralResponseRanking with MIT License | 6 votes |
def build(self, input_shape): assert len(input_shape) == 2 input_dim = input_shape[1] #self.input_spec = InputSpec(dtype=K.floatx(), shape=(None, input_dim)) self.input_spec = InputSpec(ndim=2, axes={1: input_dim}) self.W = self.add_weight( shape=(input_dim, self.output_dim), initializer=self.W_initializer, name='SparseFullyConnected_W', regularizer=self.W_regularizer, constraint=self.W_constraint) self.b = self.add_weight( shape=(self.output_dim,), initializer=self.b_initializer, name='SparseFullyConnected_b', regularizer=self.b_regularizer, constraint=self.b_constraint) if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights #self.built = True #super(SparseFullyConnectedLayer, self).build(input_shape)
Example #22
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def total_variation_loss(x): assert 4 == K.ndim(x) if K.image_data_format() == 'channels_first': a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, 1:, :img_ncols - 1]) b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, :img_nrows - 1, 1:]) else: a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, 1:, :img_ncols - 1, :]) b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :]) return K.sum(K.pow(a + b, 1.25)) # Overall loss is the weighted sum of content_loss, style_loss and tv_loss # Each individual loss uses features from image/mask models.
Example #23
Source File: neural_style_transfer.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def gram_matrix(x): assert K.ndim(x) == 3 if K.image_data_format() == 'channels_first': features = K.batch_flatten(x) else: features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) gram = K.dot(features, K.transpose(features)) return gram # the "style loss" is designed to maintain # the style of the reference image in the generated image. # It is based on the gram matrices (which capture style) of # feature maps from the style reference image # and from the generated image
Example #24
Source File: norm.py From deep_complex_networks with MIT License | 5 votes |
def build(self, input_shape): self.input_spec = InputSpec(ndim=len(input_shape), axes={self.axis: input_shape[self.axis]}) shape = (input_shape[self.axis],) self.gamma = self.add_weight(shape, initializer=self.gamma_init, regularizer=self.gamma_regularizer, name='{}_gamma'.format(self.name)) self.beta = self.add_weight(shape, initializer=self.beta_init, regularizer=self.beta_regularizer, name='{}_beta'.format(self.name)) self.built = True
Example #25
Source File: neural_style_transfer.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def gram_matrix(x): assert K.ndim(x) == 3 if K.image_data_format() == 'channels_first': features = K.batch_flatten(x) else: features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) gram = K.dot(features, K.transpose(features)) return gram # the "style loss" is designed to maintain # the style of the reference image in the generated image. # It is based on the gram matrices (which capture style) of # feature maps from the style reference image # and from the generated image
Example #26
Source File: neural_style_transfer.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def style_loss(style, combination): assert K.ndim(style) == 3 assert K.ndim(combination) == 3 S = gram_matrix(style) C = gram_matrix(combination) channels = 3 size = img_nrows * img_ncols return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2)) # an auxiliary loss function # designed to maintain the "content" of the # base image in the generated image
Example #27
Source File: neural_style_transfer.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def total_variation_loss(x): assert K.ndim(x) == 4 if K.image_data_format() == 'channels_first': a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, 1:, :img_ncols - 1]) b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, :img_nrows - 1, 1:]) else: a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, 1:, :img_ncols - 1, :]) b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :]) return K.sum(K.pow(a + b, 1.25)) # combine these loss functions into a single scalar
Example #28
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def kmeans(xs, k): assert xs.ndim == 2 try: from sklearn.cluster import k_means _, labels, _ = k_means(xs.astype('float64'), k) except ImportError: from scipy.cluster.vq import kmeans2 _, labels = kmeans2(xs, k, missing='raise') return labels
Example #29
Source File: utils.py From deep_complex_networks with MIT License | 5 votes |
def getpart_output_shape(input_shape): returned_shape = list(input_shape[:]) image_format = K.image_data_format() ndim = len(returned_shape) if (image_format == 'channels_first' and ndim != 3) or ndim == 2: axis = 1 else: axis = -1 returned_shape[axis] = returned_shape[axis] // 2 return tuple(returned_shape)
Example #30
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def gram_matrix(x): assert K.ndim(x) == 3 features = K.batch_flatten(x) gram = K.dot(features, K.transpose(features)) return gram