Python keras.backend.image_data_format() Examples
The following are 30
code examples of keras.backend.image_data_format().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example #1
Source File: conv_featuremaps_visualization.py From MCF-3D-CNN with MIT License | 6 votes |
def deprocess_image(x): # normalize tensor: center on 0., ensure std is 0.1 x -= x.mean() x /= (x.std() + K.epsilon()) x *= 0.1 # clip to [0, 1] x += 0.5 x = np.clip(x, 0, 1) # convert to RGB array x *= 255 if K.image_data_format() == 'channels_first': x = x.transpose((1, 2, 0)) x = np.clip(x, 0, 255).astype('uint8') return x
Example #2
Source File: squeezenet.py From Deep-Learning-with-TensorFlow-Second-Edition with MIT License | 6 votes |
def fire_module(x, fire_id, squeeze=16, expand=64): s_id = 'fire' + str(fire_id) + '/' if K.image_data_format() == 'channels_first': channel_axis = 1 else: channel_axis = 3 x = Conv2D(squeeze, (1, 1), padding='valid', name=s_id + sq1x1)(x) x = Activation('relu', name=s_id + relu + sq1x1)(x) left = Conv2D(expand, (1, 1), padding='valid', name=s_id + exp1x1)(x) left = Activation('relu', name=s_id + relu + exp1x1)(left) right = Conv2D(expand, (3, 3), padding='same', name=s_id + exp3x3)(x) right = Activation('relu', name=s_id + relu + exp3x3)(right) x = concatenate([left, right], axis=channel_axis, name=s_id + 'concat') return x # Original SqueezeNet from paper.
Example #3
Source File: se_resnext.py From keras-squeeze-excite-network with MIT License | 6 votes |
def __initial_conv_block_inception(input_tensor, weight_decay=5e-4): """ Adds an initial conv block, with batch norm and relu for the inception resnext Args: input_tensor: input Keras tensor weight_decay: weight decay factor Returns: a Keras tensor """ channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(64, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), strides=(2, 2))(input_tensor) x = BatchNormalization(axis=channel_axis)(x) x = LeakyReLU()(x) x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) return x
Example #4
Source File: dual_path_network.py From Keras-DualPathNetworks with Apache License 2.0 | 6 votes |
def _initial_conv_block_inception(input, initial_conv_filters, weight_decay=5e-4): ''' Adds an initial conv block, with batch norm and relu for the DPN Args: input: input tensor initial_conv_filters: number of filters for initial conv block weight_decay: weight decay factor Returns: a keras tensor ''' channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(initial_conv_filters, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), strides=(2, 2))(input) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) return x
Example #5
Source File: dual_path_network.py From Keras-DualPathNetworks with Apache License 2.0 | 6 votes |
def _bn_relu_conv_block(input, filters, kernel=(3, 3), stride=(1, 1), weight_decay=5e-4): ''' Adds a Batchnorm-Relu-Conv block for DPN Args: input: input tensor filters: number of output filters kernel: convolution kernel size stride: stride of convolution Returns: a keras tensor ''' channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(filters, kernel, padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), strides=stride)(input) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) return x
Example #6
Source File: my_image.py From MachineLearning with Apache License 2.0 | 6 votes |
def __init__(self, file, image_size, image_data_generator, batch_size=32, shuffle=False, seed=None, data_format=None, save_to_dir=None, save_prefix='', save_format='png'): if not os.path.exists(file): raise ValueError('Cannot find file: %s' % file) if data_format is None: data_format = K.image_data_format() split_lines = [line.rstrip('\n').split(' ') for line in open(file, 'r')] self.x = np.asarray([e[0] for e in split_lines]) self.y = np.asarray([float(e[1]) for e in split_lines]) self.image_size = image_size self.image_data_generator = image_data_generator self.data_format = data_format self.save_to_dir = save_to_dir self.save_prefix = save_prefix self.save_format = save_format super(FileIterator, self).__init__(self.x.shape[0], batch_size, shuffle, seed)
Example #7
Source File: gc_densenet.py From keras-global-context-networks with MIT License | 6 votes |
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4): ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D Args: ip: keras tensor nb_filter: number of filters compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block. dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool ''' concat_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip) x = Activation('relu')(x) x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) x = AveragePooling2D((2, 2), strides=(2, 2))(x) # global context block x = global_context_block(x) return x
Example #8
Source File: pspnet.py From keras-image-segmentation with MIT License | 6 votes |
def __init__(self, target_shape=None,factor=None, data_format=None, **kwargs): # conmpute dataformat if data_format is None: data_format = K.image_data_format() assert data_format in { 'channels_last', 'channels_first'} self.data_format = data_format self.input_spec = [InputSpec(ndim=4)] self.target_shape = target_shape self.factor = factor if self.data_format == 'channels_first': self.target_size = (target_shape[2], target_shape[3]) elif self.data_format == 'channels_last': self.target_size = (target_shape[1], target_shape[2]) super(BilinearUpSampling2D, self).__init__(**kwargs)
Example #9
Source File: utils.py From imgclsmob with MIT License | 6 votes |
def get_data_generator(data_iterator, num_classes): def get_arrays(db): data = db.data[0].asnumpy() if K.image_data_format() == "channels_last": data = data.transpose((0, 2, 3, 1)) labels = to_categorical( y=db.label[0].asnumpy(), num_classes=num_classes) return data, labels while True: try: db = data_iterator.next() except StopIteration: # logging.warning("get_data exception due to end of data - resetting iterator") data_iterator.reset() db = data_iterator.next() finally: yield get_arrays(db)
Example #10
Source File: utils.py From deep_complex_networks with MIT License | 6 votes |
def get_realpart(x): image_format = K.image_data_format() ndim = K.ndim(x) input_shape = K.shape(x) if (image_format == 'channels_first' and ndim != 3) or ndim == 2: input_dim = input_shape[1] // 2 return x[:, :input_dim] input_dim = input_shape[-1] // 2 if ndim == 3: return x[:, :, :input_dim] elif ndim == 4: return x[:, :, :, :input_dim] elif ndim == 5: return x[:, :, :, :, :input_dim]
Example #11
Source File: utils.py From deep_complex_networks with MIT License | 6 votes |
def get_imagpart(x): image_format = K.image_data_format() ndim = K.ndim(x) input_shape = K.shape(x) if (image_format == 'channels_first' and ndim != 3) or ndim == 2: input_dim = input_shape[1] // 2 return x[:, input_dim:] input_dim = input_shape[-1] // 2 if ndim == 3: return x[:, :, input_dim:] elif ndim == 4: return x[:, :, :, input_dim:] elif ndim == 5: return x[:, :, :, :, input_dim:]
Example #12
Source File: score.py From keras-fcn with MIT License | 6 votes |
def compute_error_matrix(y_true, y_pred): """Compute Confusion matrix (a.k.a. error matrix). a predicted c 0 1 2 t 0 [[ 5, 3, 0], u 1 [ 2, 3, 1], a 2 [ 0, 2, 11]] l Note true positves are in diagonal """ # Find channel axis given backend if K.image_data_format() == 'channels_last': ax_chn = 3 else: ax_chn = 1 classes = y_true.shape[ax_chn] confusion = get_confusion(K.argmax(y_true, axis=ax_chn).flatten(), K.argmax(y_pred, axis=ax_chn).flatten(), classes) return confusion
Example #13
Source File: hlnet.py From Face-skin-hair-segmentaiton-and-skin-color-evaluation with Apache License 2.0 | 6 votes |
def _conv_block(inputs, filters, kernel, strides=1, padding='same', use_activation=False): """Convolution Block This function defines a 2D convolution operation with BN and relu. # Arguments inputs: Tensor, input tensor of conv layer. filters: Integer, the dimensionality of the output space. kernel: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. # Returns Output tensor. """ channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(filters, kernel, padding=padding, strides=strides, use_bias=False)(inputs) x = BatchNormalization(axis=channel_axis)(x) if use_activation: x = Activation('relu')(x) return x
Example #14
Source File: depthwise_conv2d.py From keras-mobilenet with MIT License | 6 votes |
def call(self, inputs): if self.data_format is None: data_format = image_data_format() if self.data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(inputs, self.data_format) padding = _preprocess_padding(self.padding) strides = (1,) + self.strides + (1,) outputs = tf.nn.depthwise_conv2d(inputs, self.depthwise_kernel, strides=strides, padding=padding, rate=self.dilation_rate) if self.bias: outputs = K.bias_add( outputs, self.bias, data_format=self.data_format) if self.activation is not None: return self.activation(outputs) return outputs
Example #15
Source File: se_resnet.py From keras-squeeze-excite-network with MIT License | 5 votes |
def _resnet_bottleneck_block(input_tensor, filters, k=1, strides=(1, 1)): """ Adds a pre-activation resnet block with bottleneck layers Args: input_tensor: input Keras tensor filters: number of output filters k: width factor strides: strides of the convolution layer Returns: a Keras tensor """ init = input_tensor channel_axis = 1 if K.image_data_format() == "channels_first" else -1 bottleneck_expand = 4 x = BatchNormalization(axis=channel_axis)(input_tensor) x = Activation('relu')(x) if strides != (1, 1) or _tensor_shape(init)[channel_axis] != bottleneck_expand * filters * k: init = Conv2D(bottleneck_expand * filters * k, (1, 1), padding='same', kernel_initializer='he_normal', use_bias=False, strides=strides)(x) x = Conv2D(filters * k, (1, 1), padding='same', kernel_initializer='he_normal', use_bias=False)(x) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) x = Conv2D(filters * k, (3, 3), padding='same', kernel_initializer='he_normal', use_bias=False, strides=strides)(x) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) x = Conv2D(bottleneck_expand * filters * k, (1, 1), padding='same', kernel_initializer='he_normal', use_bias=False)(x) # squeeze and excite block x = squeeze_excite_block(x) m = add([x, init]) return m
Example #16
Source File: model_store.py From imgclsmob with MIT License | 5 votes |
def load_model(net, file_path, skip_mismatch=False): """ Load model state dictionary from a file. Parameters ---------- net : Model Network in which weights are loaded. file_path : str Path to the file. skip_mismatch : bool, default False Whether to skip loading of layers with wrong names. """ # if (K.backend() == "mxnet") and (K.image_data_format() == "channels_first"): # net.load_weights(filepath=file_path, by_name=skip_mismatch) # return with h5py.File(file_path, mode='r') as f: if ("layer_names" not in f.attrs) and ("model_weights" in f): f = f["model_weights"] if ("keras_version" not in f.attrs) or ("backend" not in f.attrs): raise ImportError("Unsupported version of Keras checkpoint file.") # original_keras_version = f.attrs["keras_version"].decode("utf8") original_backend = f.attrs["backend"].decode("utf8") assert (original_backend == "mxnet") if skip_mismatch: _load_weights_from_hdf5_group_by_name( f=f, layers=net.layers) else: _load_weights_from_hdf5_group( f=f, layers=net.layers)
Example #17
Source File: common.py From imgclsmob with MIT License | 5 votes |
def is_channels_first(): """ Is tested data format channels first. Returns ------- bool A flag. """ return K.image_data_format() == "channels_first"
Example #18
Source File: model_store.py From imgclsmob with MIT License | 5 votes |
def _preprocess_weights_for_loading(layer, weights): """ Converts layers weights. Parameters ---------- layer : Layer Layer instance. weights : list of np.array List of weights values. Returns ------- list of np.array A list of weights values. """ is_channels_first = (K.image_data_format() == "channels_first") if ((K.backend() == "mxnet") and (not is_channels_first)) or (K.backend() == "tensorflow"): if layer.__class__.__name__ == "Conv2D": weights[0] = np.transpose(weights[0], (2, 3, 1, 0)) elif layer.__class__.__name__ == "DepthwiseConv2D": weights[0] = np.transpose(weights[0], (2, 3, 0, 1)) for i in range(len(weights)): assert (K.int_shape(layer.weights[i]) == weights[i].shape) return weights
Example #19
Source File: utils.py From deep_complex_networks with MIT License | 5 votes |
def getpart_output_shape(input_shape): returned_shape = list(input_shape[:]) image_format = K.image_data_format() ndim = len(returned_shape) if (image_format == 'channels_first' and ndim != 3) or ndim == 2: axis = 1 else: axis = -1 returned_shape[axis] = returned_shape[axis] // 2 return tuple(returned_shape)
Example #20
Source File: gc_densenet.py From keras-global-context-networks with MIT License | 5 votes |
def __dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True, return_concat_list=False): ''' Build a dense_block where the output of each conv_block is fed to subsequent ones Args: x: keras tensor nb_layers: the number of layers of conv_block to append to the model. nb_filter: number of filters growth_rate: growth rate bottleneck: bottleneck block dropout_rate: dropout rate weight_decay: weight decay factor grow_nb_filters: flag to decide to allow number of filters to grow return_concat_list: return the list of feature maps along with the actual output Returns: keras tensor with nb_layers of conv_block appended ''' concat_axis = 1 if K.image_data_format() == 'channels_first' else -1 x_list = [x] for i in range(nb_layers): cb = __conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay) x_list.append(cb) x = concatenate([x, cb], axis=concat_axis) if grow_nb_filters: nb_filter += growth_rate # global context block x = global_context_block(x) if return_concat_list: return x, nb_filter, x_list else: return x, nb_filter
Example #21
Source File: se_inception_resnet_v2.py From keras-squeeze-excite-network with MIT License | 5 votes |
def conv2d_bn(x, filters, kernel_size, strides=1, padding='same', activation='relu', use_bias=False, name=None): """Utility function to apply conv + BN. # Arguments x: input keras tensor. filters: filters in `Conv2D`. kernel_size: kernel size as in `Conv2D`. padding: padding mode in `Conv2D`. activation: activation in `Conv2D`. strides: strides in `Conv2D`. name: name of the ops; will become `name + '_ac'` for the activation and `name + '_bn'` for the batch norm layer. # Returns Output tensor after applying `Conv2D` and `BatchNormalization`. """ x = Conv2D(filters, kernel_size, strides=strides, padding=padding, use_bias=use_bias, name=name)(x) if not use_bias: bn_axis = 1 if K.image_data_format() == 'channels_first' else 3 bn_name = None if name is None else '{name}_bn'.format(name=name) x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x) if activation is not None: ac_name = None if name is None else '{name}_ac'.format(name=name) x = Activation(activation, name=ac_name)(x) return x
Example #22
Source File: pool.py From deep_complex_networks with MIT License | 5 votes |
def call(self, x, mask=None): xshape = x._keras_shape if hasattr(self, "topf"): topf = self.topf else: if KB.image_data_format() == "channels_first": topf = (int(self.gamma[0]*xshape[2]),) else: topf = (int(self.gamma[0]*xshape[1]),) if KB.image_data_format() == "channels_first": if topf[0] > 0 and xshape[2] >= 2*topf[0]: mask = [1]*(topf[0] ) +\ [0]*(xshape[2] - 2*topf[0]) +\ [1]*(topf[0] ) mask = [[mask]] mask = np.asarray(mask, dtype=KB.floatx()).transpose((0,1,2)) mask = KB.constant(mask) x *= mask else: if topf[0] > 0 and xshape[1] >= 2*topf[0]: mask = [1]*(topf[0] ) +\ [0]*(xshape[1] - 2*topf[0]) +\ [1]*(topf[0] ) mask = [[mask]] mask = np.asarray(mask, dtype=KB.floatx()).transpose((0,2,1)) mask = KB.constant(mask) x *= mask return x
Example #23
Source File: se_resnext.py From keras-squeeze-excite-network with MIT License | 5 votes |
def __initial_conv_block(input_tensor, weight_decay=5e-4): """ Adds an initial convolution block, with batch normalization and relu activation Args: input_tensor: input Keras tensor weight_decay: weight decay factor Returns: a Keras tensor """ channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(64, (3, 3), padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(input_tensor) x = BatchNormalization(axis=channel_axis)(x) x = LeakyReLU()(x) return x
Example #24
Source File: se.py From keras-squeeze-excite-network with MIT License | 5 votes |
def squeeze_excite_block(input_tensor, ratio=16): """ Create a channel-wise squeeze-excite block Args: input_tensor: input Keras tensor ratio: number of output filters Returns: a Keras tensor References - [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507) """ init = input_tensor channel_axis = 1 if K.image_data_format() == "channels_first" else -1 filters = _tensor_shape(init)[channel_axis] se_shape = (1, 1, filters) se = GlobalAveragePooling2D()(init) se = Reshape(se_shape)(se) se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se) se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se) if K.image_data_format() == 'channels_first': se = Permute((3, 1, 2))(se) x = multiply([init, se]) return x
Example #25
Source File: gc.py From keras-global-context-networks with MIT License | 5 votes |
def _spatial_flattenND(ip, rank): assert rank in [3, 4, 5], "Rank of input must be 3, 4 or 5" ip_shape = K.int_shape(ip) channel_dim = 1 if K.image_data_format() == 'channels_first' else -1 if rank == 3: x = ip # identity op for rank 3 elif rank == 4: if channel_dim == 1: # [C, D1, D2] -> [C, D1 * D2] shape = [ip_shape[1], ip_shape[2] * ip_shape[3]] else: # [D1, D2, C] -> [D1 * D2, C] shape = [ip_shape[1] * ip_shape[2], ip_shape[3]] x = Reshape(shape)(ip) else: if channel_dim == 1: # [C, D1, D2, D3] -> [C, D1 * D2 * D3] shape = [ip_shape[1], ip_shape[2] * ip_shape[3] * ip_shape[4]] else: # [D1, D2, D3, C] -> [D1 * D2 * D3, C] shape = [ip_shape[1] * ip_shape[2] * ip_shape[3], ip_shape[4]] x = Reshape(shape)(ip) return x
Example #26
Source File: se_resnet.py From keras-squeeze-excite-network with MIT License | 5 votes |
def _resnet_block(input_tensor, filters, k=1, strides=(1, 1)): """ Adds a pre-activation resnet block without bottleneck layers Args: input_tensor: input Keras tensor filters: number of output filters k: width factor strides: strides of the convolution layer Returns: a Keras tensor """ init = input_tensor channel_axis = 1 if K.image_data_format() == "channels_first" else -1 x = BatchNormalization(axis=channel_axis)(input_tensor) x = Activation('relu')(x) if strides != (1, 1) or _tensor_shape(init)[channel_axis] != filters * k: init = Conv2D(filters * k, (1, 1), padding='same', kernel_initializer='he_normal', use_bias=False, strides=strides)(x) x = Conv2D(filters * k, (3, 3), padding='same', kernel_initializer='he_normal', use_bias=False, strides=strides)(x) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) x = Conv2D(filters * k, (3, 3), padding='same', kernel_initializer='he_normal', use_bias=False)(x) # squeeze and excite block x = squeeze_excite_block(x) m = add([x, init]) return m
Example #27
Source File: se_densenet.py From keras-squeeze-excite-network with MIT License | 5 votes |
def __dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True, return_concat_list=False): """ Build a dense_block where the output of each conv_block is fed to subsequent ones Args: x: keras tensor nb_layers: the number of layers of conv_block to append to the model. nb_filter: number of filters growth_rate: growth rate bottleneck: bottleneck block dropout_rate: dropout rate weight_decay: weight decay factor grow_nb_filters: flag to decide to allow number of filters to grow return_concat_list: return the list of feature maps along with the actual output Returns: keras tensor with nb_layers of conv_block appended """ concat_axis = 1 if K.image_data_format() == 'channels_first' else -1 x_list = [x] for i in range(nb_layers): cb = __conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay) x_list.append(cb) x = concatenate([x, cb], axis=concat_axis) if grow_nb_filters: nb_filter += growth_rate # squeeze and excite block x = squeeze_excite_block(x) if return_concat_list: return x, nb_filter, x_list else: return x, nb_filter
Example #28
Source File: se_densenet.py From keras-squeeze-excite-network with MIT License | 5 votes |
def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1e-4): """ Apply BatchNorm, Relu, 3x3 Conv2D, optional bottleneck block and dropout Args: ip: Input keras tensor nb_filter: number of filters bottleneck: add bottleneck block dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck) """ concat_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip) x = Activation('relu')(x) if bottleneck: inter_channel = nb_filter * 4 # Obtained from https://github.com/liuzhuang13/DenseNet/blob/master/densenet.lua x = Conv2D(inter_channel, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x) x = Activation('relu')(x) x = Conv2D(nb_filter, (3, 3), kernel_initializer='he_normal', padding='same', use_bias=False)(x) if dropout_rate: x = Dropout(dropout_rate)(x) return x
Example #29
Source File: se_densenet.py From keras-squeeze-excite-network with MIT License | 5 votes |
def preprocess_input(x, data_format=None): """Preprocesses a input_tensor encoding a batch of images. # Arguments x: 4D numpy input data_format: data format of the image tensor. # Returns Preprocessed tensor. """ if data_format is None: data_format = K.image_data_format() assert data_format in {'channels_last', 'channels_first'} if data_format == 'channels_first': if x.ndim == 3: # 'RGB'->'BGR' x = x[::-1, ...] # Zero-center by mean pixel x[0, :, :] -= 103.939 x[1, :, :] -= 116.779 x[2, :, :] -= 123.68 else: x = x[:, ::-1, ...] x[:, 0, :, :] -= 103.939 x[:, 1, :, :] -= 116.779 x[:, 2, :, :] -= 123.68 else: # 'RGB'->'BGR' x = x[..., ::-1] # Zero-center by mean pixel x[..., 0] -= 103.939 x[..., 1] -= 116.779 x[..., 2] -= 123.68 x *= 0.017 # scale values return x
Example #30
Source File: pspnet.py From keras-image-segmentation with MIT License | 5 votes |
def pyramid_pooling_module(x, num_filters=512, input_shape=(512, 512, 3), output_stride=16, levels=[6, 3, 2, 1]): # compute data format if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 pyramid_pooling_blocks = [x] for level in levels: pyramid_pooling_blocks.append( interp_block( x, num_filters=num_filters, level=level, input_shape=input_shape, output_stride=output_stride)) y = concatenate(pyramid_pooling_blocks) #y = merge(pyramid_pooling_blocks, mode='concat', concat_axis=3) y = _conv( filters=num_filters, kernel_size=(3, 3), padding='same', block='pyramid_out_%s'%output_stride)(y) y = BatchNormalization(axis=bn_axis, name='bn_pyramid_out_%s'%output_stride)(y) y = Activation('relu')(y) return y