Python keras.backend.permute_dimensions() Examples
The following are 30
code examples of keras.backend.permute_dimensions().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example #1
Source File: arch.py From world_models with MIT License | 7 votes |
def tf_normal(y_true, mu, sigma, pi): rollout_length = K.shape(y_true)[1] y_true = K.tile(y_true,(1,1,GAUSSIAN_MIXTURES)) y_true = K.reshape(y_true, [-1, rollout_length, GAUSSIAN_MIXTURES,Z_DIM]) oneDivSqrtTwoPI = 1 / math.sqrt(2*math.pi) result = y_true - mu # result = K.permute_dimensions(result, [2,1,0]) result = result * (1 / (sigma + 1e-8)) result = -K.square(result)/2 result = K.exp(result) * (1/(sigma + 1e-8))*oneDivSqrtTwoPI result = result * pi result = K.sum(result, axis=2) #### sum over gaussians #result = K.prod(result, axis=2) #### multiply over latent dims return result
Example #2
Source File: models.py From DigiX_HuaWei_Population_Age_Attribution_Predict with MIT License | 6 votes |
def call(self, u_vecs): if self.share_weights: u_hat_vecs = K.conv1d(u_vecs, self.W) else: u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1]) batch_size = K.shape(u_vecs)[0] input_num_capsule = K.shape(u_vecs)[1] u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule, self.num_capsule, self.dim_capsule)) u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3)) b = K.zeros_like(u_hat_vecs[:, :, :, 0]) # shape = [None, num_capsule, input_num_capsule] for i in range(self.routings): b = K.permute_dimensions(b, (0, 2, 1)) # shape = [None, input_num_capsule, num_capsule] c = K.softmax(b) c = K.permute_dimensions(c, (0, 2, 1)) b = K.permute_dimensions(b, (0, 2, 1)) outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2])) if i < self.routings - 1: b = K.batch_dot(outputs, u_hat_vecs, [2, 3]) return outputs
Example #3
Source File: customlayers.py From deep-mil-for-whole-mammogram-classification with MIT License | 6 votes |
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs): """ This is the function used for cross channel normalization in the original Alexnet """ def f(X): b, ch, r, c = X.shape half = n // 2 square = K.square(X) extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1)) , (0,half)) extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2)) scale = k for i in range(n): scale += alpha * extra_channels[:,i:i+ch,:,:] scale = scale ** beta return X / scale return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs)
Example #4
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def region_style_loss(style_image, target_image, style_mask, target_mask): '''Calculate style loss between style_image and target_image, for one common region specified by their (boolean) masks ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 2 == K.ndim(style_mask) == K.ndim(target_mask) if K.image_data_format() == 'channels_first': masked_style = style_image * style_mask masked_target = target_image * target_mask num_channels = K.shape(style_image)[0] else: masked_style = K.permute_dimensions( style_image, (2, 0, 1)) * style_mask masked_target = K.permute_dimensions( target_image, (2, 0, 1)) * target_mask num_channels = K.shape(style_image)[-1] num_channels = K.cast(num_channels, dtype='float32') s = gram_matrix(masked_style) / K.mean(style_mask) / num_channels c = gram_matrix(masked_target) / K.mean(target_mask) / num_channels return K.mean(K.square(s - c))
Example #5
Source File: customlayers.py From deep-mil-for-whole-mammogram-classification with MIT License | 6 votes |
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs): """ This is the function used for cross channel normalization in the original Alexnet """ def f(X): b, ch, r, c = X.shape half = n // 2 square = K.square(X) extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1)) , (0,half)) extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2)) scale = k for i in range(n): scale += alpha * extra_channels[:,i:i+ch,:,:] scale = scale ** beta return X / scale return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs)
Example #6
Source File: customlayers.py From convnets-keras with MIT License | 6 votes |
def crosschannelnormalization(alpha=1e-4, k=2, beta=0.75, n=5, **kwargs): """ This is the function used for cross channel normalization in the original Alexnet """ def f(X): b, ch, r, c = X.shape half = n // 2 square = K.square(X) extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0, 2, 3, 1)) , (0, half)) extra_channels = K.permute_dimensions(extra_channels, (0, 3, 1, 2)) scale = k for i in range(n): scale += alpha * extra_channels[:, i:i + ch, :, :] scale = scale ** beta return X / scale return Lambda(f, output_shape=lambda input_shape: input_shape, **kwargs)
Example #7
Source File: BBalpha_dropout.py From Dropout_BBalpha with MIT License | 6 votes |
def GenerateMCSamples(inp, layers, K_mc=20): if K_mc == 1: return apply_layers(inp, layers) output_list = [] for _ in xrange(K_mc): output_list += [apply_layers(inp, layers)] # THIS IS BAD!!! we create new dense layers at every call!!!! def pack_out(output_list): #output = K.pack(output_list) # K_mc x nb_batch x nb_classes output = K.stack(output_list) # K_mc x nb_batch x nb_classes return K.permute_dimensions(output, (1, 0, 2)) # nb_batch x K_mc x nb_classes def pack_shape(s): s = s[0] assert len(s) == 2 return (s[0], K_mc, s[1]) out = Lambda(pack_out, output_shape=pack_shape)(output_list) return out # evaluation for classification tasks
Example #8
Source File: capsule.py From Keras-TextClassification with MIT License | 6 votes |
def call(self, u_vecs): if self.share_weights: u_hat_vecs = K.conv1d(u_vecs, self.W) else: u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1]) batch_size = K.shape(u_vecs)[0] input_num_capsule = K.shape(u_vecs)[1] u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule, self.num_capsule, self.dim_capsule)) u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3)) # final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule] b = K.zeros_like(u_hat_vecs[:, :, :, 0]) # shape = [None, num_capsule, input_num_capsule] outputs = None for i in range(self.routings): b = K.permute_dimensions(b, (0, 2, 1)) # shape = [None, input_num_capsule, num_capsule] c = K.softmax(b) c = K.permute_dimensions(c, (0, 2, 1)) b = K.permute_dimensions(b, (0, 2, 1)) outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2])) if i < self.routings - 1: b = K.batch_dot(outputs, u_hat_vecs, [2, 3]) return outputs
Example #9
Source File: layer_normalization.py From keras-utility-layer-collection with MIT License | 6 votes |
def call(self, x): mean = K.mean(x, axis=-1) std = K.std(x, axis=-1) if len(x.shape) == 3: mean = K.permute_dimensions( K.repeat(mean, x.shape.as_list()[-1]), [0,2,1] ) std = K.permute_dimensions( K.repeat(std, x.shape.as_list()[-1]), [0,2,1] ) elif len(x.shape) == 2: mean = K.reshape( K.repeat_elements(mean, x.shape.as_list()[-1], 0), (-1, x.shape.as_list()[-1]) ) std = K.reshape( K.repeat_elements(mean, x.shape.as_list()[-1], 0), (-1, x.shape.as_list()[-1]) ) return self._g * (x - mean) / (std + self._epsilon) + self._b
Example #10
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def region_style_loss(style_image, target_image, style_mask, target_mask): '''Calculate style loss between style_image and target_image, for one common region specified by their (boolean) masks ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 2 == K.ndim(style_mask) == K.ndim(target_mask) if K.image_data_format() == 'channels_first': masked_style = style_image * style_mask masked_target = target_image * target_mask num_channels = K.shape(style_image)[0] else: masked_style = K.permute_dimensions( style_image, (2, 0, 1)) * style_mask masked_target = K.permute_dimensions( target_image, (2, 0, 1)) * target_mask num_channels = K.shape(style_image)[-1] num_channels = K.cast(num_channels, dtype='float32') s = gram_matrix(masked_style) / K.mean(style_mask) / num_channels c = gram_matrix(masked_target) / K.mean(target_mask) / num_channels return K.mean(K.square(s - c))
Example #11
Source File: fft.py From deep_complex_networks with MIT License | 6 votes |
def ifft2(x): ff = x ff = KB.permute_dimensions(ff, (0, 2, 1)) ff = KB.reshape(ff, (x.shape[0] *x.shape[2], x.shape[1])) tf = ifft(ff) tf = KB.reshape(tf, (x.shape[0], x.shape[2], x.shape[1])) tf = KB.permute_dimensions(tf, (0, 2, 1)) tf = KB.reshape(tf, (x.shape[0] *x.shape[1], x.shape[2])) tt = ifft(tf) tt = KB.reshape(tt, (x.shape[0], x.shape[1], x.shape[2])) return tt # # FFT Layers: # # FFT: Batched 1-D FFT (Input: (Batch, FeatureMaps, TimeSamples)) # IFFT: Batched 1-D IFFT (Input: (Batch, FeatureMaps, FreqSamples)) # FFT2: Batched 2-D FFT (Input: (Batch, FeatureMaps, TimeSamplesH, TimeSamplesW)) # IFFT2: Batched 2-D IFFT (Input: (Batch, FeatureMaps, FreqSamplesH, FreqSamplesW)) #
Example #12
Source File: multi_dim_attention.py From nlp_toolkit with MIT License | 6 votes |
def call(self, x, mask=None): uit = K.tanh(K.dot(x, self.Ws1)) ait = K.dot(uit, self.Ws2) ait = K.permute_dimensions(ait, (0, 2, 1)) A = K.softmax(ait, axis=1) M = K.batch_dot(A, x) if self.punish: A_T = K.permute_dimensions(A, (0, 2, 1)) tile_eye = K.tile(K.eye(self.weight_ws2), [self.batch_size, 1]) tile_eye = K.reshape( tile_eye, shape=[-1, self.weight_ws2, self.weight_ws2]) AA_T = K.batch_dot(A, A_T) - tile_eye P = K.l2_normalize(AA_T, axis=(1, 2)) return M, P else: return M
Example #13
Source File: neural_doodle.py From pCVR with Apache License 2.0 | 6 votes |
def region_style_loss(style_image, target_image, style_mask, target_mask): '''Calculate style loss between style_image and target_image, for one common region specified by their (boolean) masks ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 2 == K.ndim(style_mask) == K.ndim(target_mask) if K.image_data_format() == 'channels_first': masked_style = style_image * style_mask masked_target = target_image * target_mask num_channels = K.shape(style_image)[0] else: masked_style = K.permute_dimensions( style_image, (2, 0, 1)) * style_mask masked_target = K.permute_dimensions( target_image, (2, 0, 1)) * target_mask num_channels = K.shape(style_image)[-1] s = gram_matrix(masked_style) / K.mean(style_mask) / num_channels c = gram_matrix(masked_target) / K.mean(target_mask) / num_channels return K.mean(K.square(s - c))
Example #14
Source File: neural_networks.py From Quora with MIT License | 6 votes |
def call(self, u_vecs): if self.share_weights: u_hat_vecs = K.conv1d(u_vecs, self.W) else: u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1]) batch_size = K.shape(u_vecs)[0] input_num_capsule = K.shape(u_vecs)[1] u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule, self.num_capsule, self.dim_capsule)) # noqa u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3)) # final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule] # noqa b = K.zeros_like(u_hat_vecs[:, :, :, 0]) # shape = [None, num_capsule, input_num_capsule] # noqa for i in range(self.routings): b = K.permute_dimensions(b, (0, 2, 1)) # shape = [None, input_num_capsule, num_capsule] # noqa c = K.softmax(b) c = K.permute_dimensions(c, (0, 2, 1)) b = K.permute_dimensions(b, (0, 2, 1)) outputs = self.activation(tf.keras.backend.batch_dot(c, u_hat_vecs, [2, 2])) # noqa if i < self.routings - 1: b = tf.keras.backend.batch_dot(outputs, u_hat_vecs, [2, 3]) return outputs
Example #15
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def region_style_loss(style_image, target_image, style_mask, target_mask): '''Calculate style loss between style_image and target_image, for one common region specified by their (boolean) masks ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 2 == K.ndim(style_mask) == K.ndim(target_mask) if K.image_data_format() == 'channels_first': masked_style = style_image * style_mask masked_target = target_image * target_mask num_channels = K.shape(style_image)[0] else: masked_style = K.permute_dimensions( style_image, (2, 0, 1)) * style_mask masked_target = K.permute_dimensions( target_image, (2, 0, 1)) * target_mask num_channels = K.shape(style_image)[-1] num_channels = K.cast(num_channels, dtype='float32') s = gram_matrix(masked_style) / K.mean(style_mask) / num_channels c = gram_matrix(masked_target) / K.mean(target_mask) / num_channels return K.mean(K.square(s - c))
Example #16
Source File: region.py From keras-yolo with MIT License | 6 votes |
def _process_input(self, x): """Apply logistic and softmax activations to input tensor """ logistic_activate = lambda x: 1.0/(1.0 + K.exp(-x)) (batch, w, h, channels) = x.get_shape() x_temp = K.permute_dimensions(x, (3, 0, 1, 2)) x_t = [] for i in range(self.num): k = self._entry_index(i, 0) x_t.extend([ logistic_activate(K.gather(x_temp, (k, k + 1))), # 0 K.gather(x_temp, (k + 2, k + 3))]) if self.background: x_t.append(K.gather(x_temp, (k + 4,))) else: x_t.append(logistic_activate(K.gather(x_temp, (k + 4,)))) x_t.append( softmax( K.gather(x_temp, tuple(range(k + 5, k + self.coords + self.classes + 1))), axis=0)) x_t = K.concatenate(x_t, axis=0) return K.permute_dimensions(x_t, (1, 2, 3, 0))
Example #17
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def region_style_loss(style_image, target_image, style_mask, target_mask): '''Calculate style loss between style_image and target_image, for one common region specified by their (boolean) masks ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 2 == K.ndim(style_mask) == K.ndim(target_mask) if K.image_data_format() == 'channels_first': masked_style = style_image * style_mask masked_target = target_image * target_mask num_channels = K.shape(style_image)[0] else: masked_style = K.permute_dimensions( style_image, (2, 0, 1)) * style_mask masked_target = K.permute_dimensions( target_image, (2, 0, 1)) * target_mask num_channels = K.shape(style_image)[-1] num_channels = K.cast(num_channels, dtype='float32') s = gram_matrix(masked_style) / K.mean(style_mask) / num_channels c = gram_matrix(masked_target) / K.mean(target_mask) / num_channels return K.mean(K.square(s - c))
Example #18
Source File: customlayers.py From cnn_evaluation_smoke with GNU General Public License v3.0 | 6 votes |
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs): """ This is the function used for cross channel normalization in the original Alexnet """ def f(X): b, ch, r, c = X.shape half = n // 2 square = K.square(X) extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))) extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2)) scale = k for i in range(n): scale += alpha * extra_channels[:,i:i+ch,:,:] scale = scale ** beta return X / scale return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs)
Example #19
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def region_style_loss(style_image, target_image, style_mask, target_mask): '''Calculate style loss between style_image and target_image, for one common region specified by their (boolean) masks ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 2 == K.ndim(style_mask) == K.ndim(target_mask) if K.image_data_format() == 'channels_first': masked_style = style_image * style_mask masked_target = target_image * target_mask num_channels = K.shape(style_image)[0] else: masked_style = K.permute_dimensions( style_image, (2, 0, 1)) * style_mask masked_target = K.permute_dimensions( target_image, (2, 0, 1)) * target_mask num_channels = K.shape(style_image)[-1] num_channels = K.cast(num_channels, dtype='float32') s = gram_matrix(masked_style) / K.mean(style_mask) / num_channels c = gram_matrix(masked_target) / K.mean(target_mask) / num_channels return K.mean(K.square(s - c))
Example #20
Source File: neural_doodle.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def region_style_loss(style_image, target_image, style_mask, target_mask): '''Calculate style loss between style_image and target_image, for one common region specified by their (boolean) masks ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 2 == K.ndim(style_mask) == K.ndim(target_mask) if K.image_data_format() == 'channels_first': masked_style = style_image * style_mask masked_target = target_image * target_mask num_channels = K.shape(style_image)[0] else: masked_style = K.permute_dimensions( style_image, (2, 0, 1)) * style_mask masked_target = K.permute_dimensions( target_image, (2, 0, 1)) * target_mask num_channels = K.shape(style_image)[-1] num_channels = K.cast(num_channels, dtype='float32') s = gram_matrix(masked_style) / K.mean(style_mask) / num_channels c = gram_matrix(masked_target) / K.mean(target_mask) / num_channels return K.mean(K.square(s - c))
Example #21
Source File: submission_v50.py From Quora with MIT License | 6 votes |
def call(self, u_vecs): if self.share_weights: u_hat_vecs = K.conv1d(u_vecs, self.W) else: u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1]) batch_size = K.shape(u_vecs)[0] input_num_capsule = K.shape(u_vecs)[1] u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule, self.num_capsule, self.dim_capsule)) # noqa u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3)) # final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule] # noqa b = K.zeros_like(u_hat_vecs[:, :, :, 0]) # shape = [None, num_capsule, input_num_capsule] # noqa for i in range(self.routings): b = K.permute_dimensions(b, (0, 2, 1)) # shape = [None, input_num_capsule, num_capsule] # noqa c = K.softmax(b) c = K.permute_dimensions(c, (0, 2, 1)) b = K.permute_dimensions(b, (0, 2, 1)) outputs = self.activation(tf.keras.backend.batch_dot(c, u_hat_vecs, [2, 2])) # noqa if i < self.routings - 1: b = tf.keras.backend.batch_dot(outputs, u_hat_vecs, [2, 3]) return outputs
Example #22
Source File: losses.py From voxelmorph with GNU General Public License v3.0 | 6 votes |
def _diffs(self, y): vol_shape = y.get_shape().as_list()[1:-1] ndims = len(vol_shape) df = [None] * ndims for i in range(ndims): d = i + 1 # permute dimensions to put the ith dimension first r = [d, *range(d), *range(d + 1, ndims + 2)] y = K.permute_dimensions(y, r) dfi = y[1:, ...] - y[:-1, ...] # permute back # note: this might not be necessary for this loss specifically, # since the results are just summed over anyway. r = [*range(1, d + 1), 0, *range(d + 1, ndims + 2)] df[i] = K.permute_dimensions(dfi, r) return df
Example #23
Source File: losses.py From voxelmorph with GNU General Public License v3.0 | 6 votes |
def prec_loss(self, y_pred): """ a more manual implementation of the precision matrix term mu * P * mu where P = D - A where D is the degree matrix and A is the adjacency matrix mu * P * mu = 0.5 * sum_i mu_i sum_j (mu_i - mu_j) = 0.5 * sum_i,j (mu_i - mu_j) ^ 2 where j are neighbors of i Note: could probably do with a difference filter, but the edges would be complicated unless tensorflow allowed for edge copying """ vol_shape = y_pred.get_shape().as_list()[1:-1] ndims = len(vol_shape) sm = 0 for i in range(ndims): d = i + 1 # permute dimensions to put the ith dimension first r = [d, *range(d), *range(d + 1, ndims + 2)] y = K.permute_dimensions(y_pred, r) df = y[1:, ...] - y[:-1, ...] sm += K.mean(df * df) return 0.5 * sm / ndims
Example #24
Source File: mrf.py From image-analogies with MIT License | 6 votes |
def make_patches_grid(x, patch_size, patch_stride): '''Break image `x` up into a grid of patches. input shape: (channels, rows, cols) output shape: (rows, cols, channels, patch_rows, patch_cols) ''' from theano.tensor.nnet.neighbours import images2neibs # TODO: all K, no T x = K.expand_dims(x, 0) xs = K.shape(x) num_rows = 1 + (xs[-2] - patch_size) // patch_stride num_cols = 1 + (xs[-1] - patch_size) // patch_stride num_channels = xs[-3] patches = images2neibs(x, (patch_size, patch_size), (patch_stride, patch_stride), mode='valid') # neibs are sorted per-channel patches = K.reshape(patches, (num_channels, K.shape(patches)[0] // num_channels, patch_size, patch_size)) patches = K.permute_dimensions(patches, (1, 0, 2, 3)) # arrange in a 2d-grid (rows, cols, channels, px, py) patches = K.reshape(patches, (num_rows, num_cols, num_channels, patch_size, patch_size)) patches_norm = K.sqrt(K.sum(K.square(patches), axis=(2,3,4), keepdims=True)) return patches, patches_norm
Example #25
Source File: tensorflow_backend.py From keras-contrib with MIT License | 5 votes |
def extract_image_patches(x, ksizes, ssizes, padding='same', data_format='channels_last'): """Extract the patches from an image. # Arguments x: The input image ksizes: 2-d tuple with the kernel size ssizes: 2-d tuple with the strides size padding: 'same' or 'valid' data_format: 'channels_last' or 'channels_first' # Returns The (k_w,k_h) patches extracted TF ==> (batch_size,w,h,k_w,k_h,c) TH ==> (batch_size,w,h,c,k_w,k_h) """ kernel = [1, ksizes[0], ksizes[1], 1] strides = [1, ssizes[0], ssizes[1], 1] padding = _preprocess_padding(padding) if data_format == 'channels_first': x = K.permute_dimensions(x, (0, 2, 3, 1)) bs_i, w_i, h_i, ch_i = K.int_shape(x) patches = tf.extract_image_patches(x, kernel, strides, [1, 1, 1, 1], padding) # Reshaping to fit Theano bs, w, h, ch = K.int_shape(patches) reshaped = tf.reshape(patches, [-1, w, h, tf.floordiv(ch, ch_i), ch_i]) final_shape = [-1, w, h, ch_i, ksizes[0], ksizes[1]] patches = tf.reshape(tf.transpose(reshaped, [0, 1, 2, 4, 3]), final_shape) if data_format == 'channels_last': patches = K.permute_dimensions(patches, [0, 1, 2, 4, 5, 3]) return patches
Example #26
Source File: cifar10_cnn_capsule.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def call(self, inputs): """Following the routing algorithm from Hinton's paper, but replace b = b + <u,v> with b = <u,v>. This change can improve the feature representation of Capsule. However, you can replace b = K.batch_dot(outputs, hat_inputs, [2, 3]) with b += K.batch_dot(outputs, hat_inputs, [2, 3]) to realize a standard routing. """ if self.share_weights: hat_inputs = K.conv1d(inputs, self.kernel) else: hat_inputs = K.local_conv1d(inputs, self.kernel, [1], [1]) batch_size = K.shape(inputs)[0] input_num_capsule = K.shape(inputs)[1] hat_inputs = K.reshape(hat_inputs, (batch_size, input_num_capsule, self.num_capsule, self.dim_capsule)) hat_inputs = K.permute_dimensions(hat_inputs, (0, 2, 1, 3)) b = K.zeros_like(hat_inputs[:, :, :, 0]) for i in range(self.routings): c = softmax(b, 1) if K.backend() == 'theano': o = K.sum(o, axis=1) o = self.activation(K.batch_dot(c, hat_inputs, [2, 2])) if i < self.routings - 1: b = K.batch_dot(o, hat_inputs, [2, 3]) if K.backend() == 'theano': o = K.sum(o, axis=1) return o
Example #27
Source File: gaussian_prior.py From sam with MIT License | 5 votes |
def call(self, x, mask=None): mu_x = self.W[:self.nb_gaussian] mu_y = self.W[self.nb_gaussian:self.nb_gaussian*2] sigma_x = self.W[self.nb_gaussian*2:self.nb_gaussian*3] sigma_y = self.W[self.nb_gaussian*3:] self.b_s = x.shape[0] self.height = x.shape[2] self.width = x.shape[3] e = self.height / self.width e1 = (1 - e) / 2 e2 = e1 + e mu_x = K.clip(mu_x, 0.25, 0.75) mu_y = K.clip(mu_y, 0.35, 0.65) sigma_x = K.clip(sigma_x, 0.1, 0.9) sigma_y = K.clip(sigma_y, 0.2, 0.8) x_t = T.dot(T.ones((self.height, 1)), self._linspace(0, 1.0, self.width).dimshuffle('x', 0)) y_t = T.dot(self._linspace(e1, e2, self.height).dimshuffle(0, 'x'), T.ones((1, self.width))) x_t = K.repeat_elements(K.expand_dims(x_t, dim=-1), self.nb_gaussian, axis=-1) y_t = K.repeat_elements(K.expand_dims(y_t, dim=-1), self.nb_gaussian, axis=-1) gaussian = 1 / (2 * np.pi * sigma_x * sigma_y + K.epsilon()) * \ T.exp(-((x_t - mu_x) ** 2 / (2 * sigma_x ** 2 + K.epsilon()) + (y_t - mu_y) ** 2 / (2 * sigma_y ** 2 + K.epsilon()))) gaussian = K.permute_dimensions(gaussian, (2, 0, 1)) max_gauss = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.max(K.max(gaussian, axis=1), axis=1)), self.height, axis=-1)), self.width, axis=-1) gaussian = gaussian / max_gauss output = K.repeat_elements(K.expand_dims(gaussian, dim=0), self.b_s, axis=0) return output
Example #28
Source File: transform_rnn.py From View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition with MIT License | 5 votes |
def _transform_trans(theta,input): batch1, step1, dim1 = input.shape input = K.reshape(input,(batch1,step1,dim1//3,3)) input = K.reshape(input,(batch1*step1,dim1//3,3)) input = K.permute_dimensions(input,[0,2,1]) add = T.ones((batch1*step1,1,dim1//3)) input= K.concatenate([input,add],axis=1) output = K.batch_dot(theta,input) output = K.permute_dimensions(output,[0,2,1]) output = K.reshape(output,(output.shape[0],dim1)) output = K.reshape(output,(batch1,step1,output.shape[1])) return output
Example #29
Source File: cifar10_cnn_capsule.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def call(self, inputs): """Following the routing algorithm from Hinton's paper, but replace b = b + <u,v> with b = <u,v>. This change can improve the feature representation of Capsule. However, you can replace b = K.batch_dot(outputs, hat_inputs, [2, 3]) with b += K.batch_dot(outputs, hat_inputs, [2, 3]) to realize a standard routing. """ if self.share_weights: hat_inputs = K.conv1d(inputs, self.kernel) else: hat_inputs = K.local_conv1d(inputs, self.kernel, [1], [1]) batch_size = K.shape(inputs)[0] input_num_capsule = K.shape(inputs)[1] hat_inputs = K.reshape(hat_inputs, (batch_size, input_num_capsule, self.num_capsule, self.dim_capsule)) hat_inputs = K.permute_dimensions(hat_inputs, (0, 2, 1, 3)) b = K.zeros_like(hat_inputs[:, :, :, 0]) for i in range(self.routings): c = softmax(b, 1) if K.backend() == 'theano': o = K.sum(o, axis=1) o = self.activation(K.batch_dot(c, hat_inputs, [2, 2])) if i < self.routings - 1: b = K.batch_dot(o, hat_inputs, [2, 3]) if K.backend() == 'theano': o = K.sum(o, axis=1) return o
Example #30
Source File: cifar10_cnn_capsule.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def call(self, inputs): """Following the routing algorithm from Hinton's paper, but replace b = b + <u,v> with b = <u,v>. This change can improve the feature representation of Capsule. However, you can replace b = K.batch_dot(outputs, hat_inputs, [2, 3]) with b += K.batch_dot(outputs, hat_inputs, [2, 3]) to realize a standard routing. """ if self.share_weights: hat_inputs = K.conv1d(inputs, self.kernel) else: hat_inputs = K.local_conv1d(inputs, self.kernel, [1], [1]) batch_size = K.shape(inputs)[0] input_num_capsule = K.shape(inputs)[1] hat_inputs = K.reshape(hat_inputs, (batch_size, input_num_capsule, self.num_capsule, self.dim_capsule)) hat_inputs = K.permute_dimensions(hat_inputs, (0, 2, 1, 3)) b = K.zeros_like(hat_inputs[:, :, :, 0]) for i in range(self.routings): c = softmax(b, 1) if K.backend() == 'theano': o = K.sum(o, axis=1) o = self.activation(K.batch_dot(c, hat_inputs, [2, 2])) if i < self.routings - 1: b = K.batch_dot(o, hat_inputs, [2, 3]) if K.backend() == 'theano': o = K.sum(o, axis=1) return o