Python keras.layers.Reshape() Examples
The following are 30
code examples of keras.layers.Reshape().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.
Example #1
Source File: sgan.py From Keras-GAN with MIT License | 8 votes |
def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(1, kernel_size=3, padding="same")) model.add(Activation("tanh")) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
Example #2
Source File: infogan.py From Keras-GAN with MIT License | 6 votes |
def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(self.channels, kernel_size=3, padding='same')) model.add(Activation("tanh")) gen_input = Input(shape=(self.latent_dim,)) img = model(gen_input) model.summary() return Model(gen_input, img)
Example #3
Source File: wgan.py From Keras-GAN with MIT License | 6 votes |
def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(Conv2D(self.channels, kernel_size=4, padding="same")) model.add(Activation("tanh")) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
Example #4
Source File: lsgan.py From Keras-GAN with MIT License | 6 votes |
def build_generator(self): model = Sequential() model.add(Dense(256, input_dim=self.latent_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(np.prod(self.img_shape), activation='tanh')) model.add(Reshape(self.img_shape)) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
Example #5
Source File: bgan.py From Keras-GAN with MIT License | 6 votes |
def build_generator(self): model = Sequential() model.add(Dense(256, input_dim=self.latent_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(np.prod(self.img_shape), activation='tanh')) model.add(Reshape(self.img_shape)) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
Example #6
Source File: dcgan.py From Keras-GAN with MIT License | 6 votes |
def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(Conv2D(self.channels, kernel_size=3, padding="same")) model.add(Activation("tanh")) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
Example #7
Source File: gan.py From Keras-GAN with MIT License | 6 votes |
def build_generator(self): model = Sequential() model.add(Dense(256, input_dim=self.latent_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(np.prod(self.img_shape), activation='tanh')) model.add(Reshape(self.img_shape)) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
Example #8
Source File: generator.py From Generative-Adversarial-Networks-Cookbook with MIT License | 5 votes |
def dc_model(self): model = Sequential() model.add(Dense(256*8*8,activation=LeakyReLU(0.2), input_dim=self.LATENT_SPACE_SIZE)) model.add(BatchNormalization()) model.add(Reshape((8, 8, 256))) model.add(UpSampling2D()) model.add(Convolution2D(128, 5, 5, border_mode='same',activation=LeakyReLU(0.2))) model.add(BatchNormalization()) model.add(UpSampling2D()) model.add(Convolution2D(64, 5, 5, border_mode='same',activation=LeakyReLU(0.2))) model.add(BatchNormalization()) model.add(UpSampling2D()) model.add(Convolution2D(self.C, 5, 5, border_mode='same', activation='tanh')) return model
Example #9
Source File: model.py From EasyPR-python with Apache License 2.0 | 5 votes |
def call(self, inputs): def wrapper(rois, mrcnn_class, mrcnn_bbox, image_meta): # currently supports one image per batch b = 0 _, _, window, _ = parse_image_meta(image_meta) detections = refine_detections( rois[b], mrcnn_class[b], mrcnn_bbox[b], window[b], self.config) # Pad with zeros if detections < DETECTION_MAX_INSTANCES gap = self.config.DETECTION_MAX_INSTANCES - detections.shape[0] assert gap >= 0 if gap > 0: detections = np.pad(detections, [(0, gap), (0, 0)], 'constant', constant_values=0) # Cast to float32 # TODO: track where float64 is introduced detections = detections.astype(np.float32) # Reshape output # [batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels return np.reshape(detections, [1, self.config.DETECTION_MAX_INSTANCES, 6]) # Return wrapped function return tf.py_func(wrapper, inputs, tf.float32)
Example #10
Source File: generator.py From Generative-Adversarial-Networks-Cookbook with MIT License | 5 votes |
def model(self, block_starting_size=128,num_blocks=4): model = Sequential() block_size = block_starting_size model.add(Dense(block_size, input_shape=(self.LATENT_SPACE_SIZE,))) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) for i in range(num_blocks-1): block_size = block_size * 2 model.add(Dense(block_size)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(self.W * self.H * self.C, activation='tanh')) model.add(Reshape((self.W, self.H, self.C))) return model
Example #11
Source File: FSANET_model.py From FSA-Net with Apache License 2.0 | 5 votes |
def ssr_FC_model_build(self, feat_dim, name_F): input_s1_pre = Input((feat_dim,)) input_s2_pre = Input((feat_dim,)) input_s3_pre = Input((feat_dim,)) def _process_input(stage_index, stage_num, num_classes, input_s_pre): feat_delta_s = Dense(2*num_classes,activation='tanh')(input_s_pre) delta_s = Dense(num_classes,activation='tanh',name=f'delta_s{stage_index}')(feat_delta_s) feat_local_s = Dense(2*num_classes,activation='tanh')(input_s_pre) local_s = Dense(units=num_classes, activation='tanh', name=f'local_delta_stage{stage_index}')(feat_local_s) feat_pred_s = Dense(stage_num*num_classes,activation='relu')(input_s_pre) pred_s = Reshape((num_classes,stage_num))(feat_pred_s) return delta_s, local_s, pred_s delta_s1, local_s1, pred_s1 = _process_input(1, self.stage_num[0], self.num_classes, input_s1_pre) delta_s2, local_s2, pred_s2 = _process_input(2, self.stage_num[1], self.num_classes, input_s2_pre) delta_s3, local_s3, pred_s3 = _process_input(3, self.stage_num[2], self.num_classes, input_s3_pre) return Model(inputs=[input_s1_pre,input_s2_pre,input_s3_pre],outputs=[pred_s1,pred_s2,pred_s3,delta_s1,delta_s2,delta_s3,local_s1,local_s2,local_s3], name=name_F)
Example #12
Source File: FSANET_model.py From FSA-Net with Apache License 2.0 | 5 votes |
def ssr_feat_S_model_build(self, m_dim): input_preS = Input((self.map_xy_size,self.map_xy_size,64)) if self.is_varS_model: feat_preS = MomentsLayer()(input_preS) else: feat_preS = Conv2D(1,(1,1),padding='same',activation='sigmoid')(input_preS) feat_preS = Reshape((-1,))(feat_preS) SR_matrix = Dense(m_dim*(self.map_xy_size*self.map_xy_size*3),activation='sigmoid')(feat_preS) SR_matrix = Reshape((m_dim,(self.map_xy_size*self.map_xy_size*3)))(SR_matrix) return Model(inputs=input_preS,outputs=[SR_matrix,feat_preS],name='feat_S_model')
Example #13
Source File: discriminator.py From Generative-Adversarial-Networks-Cookbook with MIT License | 5 votes |
def model(self): input_layer = Input(shape=self.SHAPE) x = Convolution2D(96,3,3, subsample=(2,2), border_mode='same',activation='relu')(input_layer) x = Convolution2D(64,3,3, subsample=(2,2), border_mode='same',activation='relu')(x) x = MaxPooling2D(pool_size=(3,3),border_mode='same')(x) x = Convolution2D(32,3,3, subsample=(1,1), border_mode='same',activation='relu')(x) x = Convolution2D(32,1,1, subsample=(1,1), border_mode='same',activation='relu')(x) x = Convolution2D(2,1,1, subsample=(1,1), border_mode='same',activation='relu')(x) output_layer = Reshape((-1,2))(x) return Model(input_layer,output_layer)
Example #14
Source File: generator.py From Generative-Adversarial-Networks-Cookbook with MIT License | 5 votes |
def model(self, block_starting_size=128,num_blocks=4): model = Sequential() block_size = block_starting_size model.add(Dense(block_size, input_shape=(self.LATENT_SPACE_SIZE,))) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) for i in range(num_blocks-1): block_size = block_size * 2 model.add(Dense(block_size)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(self.W * self.H * self.C, activation='tanh')) model.add(Reshape((self.W, self.H, self.C))) return model
Example #15
Source File: gc.py From keras-global-context-networks with MIT License | 5 votes |
def _spatial_flattenND(ip, rank): assert rank in [3, 4, 5], "Rank of input must be 3, 4 or 5" ip_shape = K.int_shape(ip) channel_dim = 1 if K.image_data_format() == 'channels_first' else -1 if rank == 3: x = ip # identity op for rank 3 elif rank == 4: if channel_dim == 1: # [C, D1, D2] -> [C, D1 * D2] shape = [ip_shape[1], ip_shape[2] * ip_shape[3]] else: # [D1, D2, C] -> [D1 * D2, C] shape = [ip_shape[1] * ip_shape[2], ip_shape[3]] x = Reshape(shape)(ip) else: if channel_dim == 1: # [C, D1, D2, D3] -> [C, D1 * D2 * D3] shape = [ip_shape[1], ip_shape[2] * ip_shape[3] * ip_shape[4]] else: # [D1, D2, D3, C] -> [D1 * D2 * D3, C] shape = [ip_shape[1] * ip_shape[2] * ip_shape[3], ip_shape[4]] x = Reshape(shape)(ip) return x
Example #16
Source File: model.py From PanopticSegmentation with MIT License | 5 votes |
def call(self, inputs): rois = inputs[0] mrcnn_class = inputs[1] mrcnn_bbox = inputs[2] image_meta = inputs[3] # Get windows of images in normalized coordinates. Windows are the area # in the image that excludes the padding. # Use the shape of the first image in the batch to normalize the window # because we know that all images get resized to the same size. m = parse_image_meta_graph(image_meta) image_shape = m['image_shape'][0] window = norm_boxes_graph(m['window'], image_shape[:2]) # Run detection refinement graph on each item in the batch detections_batch = utils.batch_slice( [rois, mrcnn_class, mrcnn_bbox, window], lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config), self.config.IMAGES_PER_GPU) # Reshape output # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in # normalized coordinates return tf.reshape( detections_batch, [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
Example #17
Source File: FSANET_model.py From FSA-Net with Apache License 2.0 | 5 votes |
def ssr_noS_model_build(self, **kwargs): input_s1_preS = Input((self.map_xy_size,self.map_xy_size,64)) input_s2_preS = Input((self.map_xy_size,self.map_xy_size,64)) input_s3_preS = Input((self.map_xy_size,self.map_xy_size,64)) primcaps_s1 = Reshape((self.map_xy_size*self.map_xy_size,64))(input_s1_preS) primcaps_s2 = Reshape((self.map_xy_size*self.map_xy_size,64))(input_s2_preS) primcaps_s3 = Reshape((self.map_xy_size*self.map_xy_size,64))(input_s3_preS) primcaps = Concatenate(axis=1)([primcaps_s1,primcaps_s2,primcaps_s3]) return Model(inputs=[input_s1_preS, input_s2_preS, input_s3_preS],outputs=primcaps, name='ssr_S_model')
Example #18
Source File: model.py From EasyPR-python with Apache License 2.0 | 5 votes |
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox): """Loss for Mask R-CNN bounding box refinement. target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))] target_class_ids: [batch, num_rois]. Integer class IDs. pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))] """ # Reshape to merge batch and roi dimensions for simplicity. target_class_ids = K.reshape(target_class_ids, (-1,)) target_bbox = K.reshape(target_bbox, (-1, 4)) pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4)) # Only positive ROIs contribute to the loss. And only # the right class_id of each ROI. Get their indicies. positive_roi_ix = tf.where(target_class_ids > 0)[:, 0] positive_roi_class_ids = tf.cast(tf.gather(target_class_ids, positive_roi_ix), tf.int64) indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1) # Gather the deltas (predicted and true) that contribute to loss target_bbox = tf.gather(target_bbox, positive_roi_ix) pred_bbox = tf.gather_nd(pred_bbox, indices) # Smooth-L1 Loss loss = K.switch(tf.size(target_bbox) > 0, smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox), tf.constant(0.0)) loss = K.mean(loss) loss = K.reshape(loss, [1, 1]) return loss
Example #19
Source File: model.py From EasyPR-python with Apache License 2.0 | 5 votes |
def rpn_graph(feature_map, anchors_per_location, anchor_stride): """Builds the computation graph of Region Proposal Network. feature_map: backbone features [batch, height, width, depth] anchors_per_location: number of anchors per pixel in the feature map anchor_stride: Controls the density of anchors. Typically 1 (anchors for every pixel in the feature map), or 2 (every other pixel). Returns: rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax) rpn_probs: [batch, W, W, 2] Anchor classifier probabilities. rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be applied to anchors. """ # TODO: check if stride of 2 causes alignment issues if the featuremap # is not even. # Shared convolutional base of the RPN shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu', strides=anchor_stride, name='rpn_conv_shared')(feature_map) # Anchor Score. [batch, height, width, anchors per location * 2]. x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid', activation='linear', name='rpn_class_raw')(shared) # Reshape to [batch, anchors, 2] rpn_class_logits = KL.Lambda( lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x) # Softmax on last dimension of BG/FG. rpn_probs = KL.Activation("softmax", name="rpn_class_xxx")(rpn_class_logits) # Bounding box refinement. [batch, H, W, anchors per location, depth] # where depth is [x, y, log(w), log(h)] x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid", activation='linear', name='rpn_bbox_pred')(shared) # Reshape to [batch, anchors, 4] rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x) return [rpn_class_logits, rpn_probs, rpn_bbox]
Example #20
Source File: FSANET_model.py From FSA-Net with Apache License 2.0 | 5 votes |
def ssr_aggregation_model_build(self, shape_primcaps): input_primcaps = Input(shape_primcaps) agg_feat = NetVLAD(feature_size=64, max_samples=self.num_primcaps, cluster_size=self.num_capsule, output_dim=self.num_capsule*self.dim_capsule)(input_primcaps) agg_feat = Reshape((self.num_capsule,self.dim_capsule))(agg_feat) feat_s1_div, feat_s2_div, feat_s3_div = AggregatedFeatureExtractionLayer(num_capsule=self.num_capsule)(agg_feat) feat_s1_div = Reshape((-1,))(feat_s1_div) feat_s2_div = Reshape((-1,))(feat_s2_div) feat_s3_div = Reshape((-1,))(feat_s3_div) return Model(inputs=input_primcaps,outputs=[feat_s1_div,feat_s2_div,feat_s3_div], name='ssr_Agg_model')
Example #21
Source File: FSANET_model.py From FSA-Net with Apache License 2.0 | 5 votes |
def ssr_aggregation_model_build(self, shape_primcaps): input_primcaps = Input(shape_primcaps) metric_feat = MatMulLayer(16,type=1)(input_primcaps) metric_feat = MatMulLayer(3,type=2)(metric_feat) feat_s1_div, feat_s2_div, feat_s3_div = AggregatedFeatureExtractionLayer(num_capsule=self.num_capsule)(metric_feat) feat_s1_div = Reshape((-1,))(feat_s1_div) feat_s2_div = Reshape((-1,))(feat_s2_div) feat_s3_div = Reshape((-1,))(feat_s3_div) return Model(inputs=input_primcaps,outputs=[feat_s1_div,feat_s2_div,feat_s3_div], name='ssr_Metric_model')
Example #22
Source File: gc.py From keras-global-context-networks with MIT License | 5 votes |
def _spatial_expandND(ip, rank): assert rank in [3, 4, 5], "Rank of input must be 3, 4 or 5" channel_dim = 1 if K.image_data_format() == 'channels_first' else -1 if rank == 3: x = Permute((2, 1))(ip) # identity op for rank 3 elif rank == 4: if channel_dim == 1: # [C, D1, D2] -> [C, D1 * D2] shape = [-1, 1, 1] else: # [D1, D2, C] -> [D1 * D2, C] shape = [1, 1, -1] x = Reshape(shape)(ip) else: if channel_dim == 1: # [C, D1, D2, D3] -> [C, D1 * D2 * D3] shape = [-1, 1, 1, 1] else: # [D1, D2, D3, C] -> [D1 * D2 * D3, C] shape = [1, 1, 1, -1] x = Reshape(shape)(ip) return x
Example #23
Source File: FSANET_model.py From FSA-Net with Apache License 2.0 | 5 votes |
def ssr_F_model_build(self, feat_dim, name_F): input_s1_pre = Input((feat_dim,)) input_s2_pre = Input((feat_dim,)) input_s3_pre = Input((feat_dim,)) def _process_input(stage_index, stage_num, num_classes, input_s_pre): feat_delta_s = FeatSliceLayer(0,4)(input_s_pre) delta_s = Dense(num_classes,activation='tanh',name=f'delta_s{stage_index}')(feat_delta_s) feat_local_s = FeatSliceLayer(4,8)(input_s_pre) local_s = Dense(units=num_classes, activation='tanh', name=f'local_delta_stage{stage_index}')(feat_local_s) feat_pred_s = FeatSliceLayer(8,16)(input_s_pre) feat_pred_s = Dense(stage_num*num_classes,activation='relu')(feat_pred_s) pred_s = Reshape((num_classes,stage_num))(feat_pred_s) return delta_s, local_s, pred_s delta_s1, local_s1, pred_s1 = _process_input(1, self.stage_num[0], self.num_classes, input_s1_pre) delta_s2, local_s2, pred_s2 = _process_input(2, self.stage_num[1], self.num_classes, input_s2_pre) delta_s3, local_s3, pred_s3 = _process_input(3, self.stage_num[2], self.num_classes, input_s3_pre) return Model(inputs=[input_s1_pre,input_s2_pre,input_s3_pre],outputs=[pred_s1,pred_s2,pred_s3,delta_s1,delta_s2,delta_s3,local_s1,local_s2,local_s3], name=name_F)
Example #24
Source File: SiameseModel.py From MassImageRetrieval with Apache License 2.0 | 5 votes |
def get_Shared_Model(input_dim): sharedNet = Sequential() sharedNet.add(Dense(128, input_shape=(input_dim,), activation='relu')) sharedNet.add(Dropout(0.1)) sharedNet.add(Dense(128, activation='relu')) sharedNet.add(Dropout(0.1)) sharedNet.add(Dense(128, activation='relu')) # sharedNet.add(Dropout(0.1)) # sharedNet.add(Dense(3, activation='relu')) # sharedNet = Sequential() # sharedNet.add(Dense(4096, activation="tanh", kernel_regularizer=l2(2e-3))) # sharedNet.add(Reshape(target_shape=(64, 64, 1))) # sharedNet.add(Conv2D(filters=64, kernel_size=3, strides=(2, 2), padding="same", activation="relu", kernel_regularizer=l2(1e-3))) # sharedNet.add(MaxPooling2D()) # sharedNet.add(Conv2D(filters=128, kernel_size=3, strides=(2, 2), padding="same", activation="relu", kernel_regularizer=l2(1e-3))) # sharedNet.add(MaxPooling2D()) # sharedNet.add(Conv2D(filters=64, kernel_size=3, strides=(1, 1), padding="same", activation="relu", kernel_regularizer=l2(1e-3))) # sharedNet.add(Flatten()) # sharedNet.add(Dense(1024, activation="sigmoid", kernel_regularizer=l2(1e-3))) return sharedNet
Example #25
Source File: se.py From keras-squeeze-excite-network with MIT License | 5 votes |
def squeeze_excite_block(input_tensor, ratio=16): """ Create a channel-wise squeeze-excite block Args: input_tensor: input Keras tensor ratio: number of output filters Returns: a Keras tensor References - [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507) """ init = input_tensor channel_axis = 1 if K.image_data_format() == "channels_first" else -1 filters = _tensor_shape(init)[channel_axis] se_shape = (1, 1, filters) se = GlobalAveragePooling2D()(init) se = Reshape(se_shape)(se) se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se) se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se) if K.image_data_format() == 'channels_first': se = Permute((3, 1, 2))(se) x = multiply([init, se]) return x
Example #26
Source File: se.py From keras-squeeze-excite-network with MIT License | 5 votes |
def squeeze_excite_block(input, ratio=16): ''' Create a channel-wise squeeze-excite block Args: input: input tensor filters: number of output filters Returns: a keras tensor References - [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507) ''' init = input channel_axis = 1 if K.image_data_format() == "channels_first" else -1 filters = init._keras_shape[channel_axis] se_shape = (1, 1, filters) se = GlobalAveragePooling2D()(init) se = Reshape(se_shape)(se) se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se) se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se) if K.image_data_format() == 'channels_first': se = Permute((3, 1, 2))(se) x = multiply([init, se]) return x
Example #27
Source File: pspnet.py From keras-image-segmentation with MIT License | 5 votes |
def _squeeze_excite_block(input, filters, k=1, name=None): init = input se_shape = (1, 1, filters * k) if K.image_data_format() == 'channels_last' else (filters * k, 1, 1) se = GlobalAveragePooling2D()(init) se = Reshape(se_shape)(se) se = Dense((filters * k) // 16, activation='relu', kernel_initializer='he_normal', use_bias=False,name=name+'_fc1')(se) se = Dense(filters * k, activation='sigmoid', kernel_initializer='he_normal', use_bias=False,name=name+'_fc2')(se) return se # pyramid pooling function
Example #28
Source File: pspnet.py From keras-image-segmentation with MIT License | 5 votes |
def duc(x, factor=8, output_shape=(512, 512, 1)): if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 H, W, c, r = output_shape[0], output_shape[1], output_shape[2], factor h = H / r w = W / r x = Conv2D( c*r*r, (3, 3), padding='same', name='conv_duc_%s'%factor)(x) x = BatchNormalization(axis=bn_axis,name='bn_duc_%s'%factor)(x) x = Activation('relu')(x) x = Permute((3, 1, 2))(x) x = Reshape((c, r, r, h, w))(x) x = Permute((1, 4, 2, 5, 3))(x) x = Reshape((c, H, W))(x) x = Permute((2, 3, 1))(x) return x # interpolation
Example #29
Source File: stacking.py From PyShortTextCategorization with MIT License | 5 votes |
def train(self, classdict, optimizer='adam', l2reg=0.01, bias_l2reg=0.01, nb_epoch=1000): """ Train the stacked generalization. :param classdict: training data :param optimizer: optimizer to use Options: sgd, rmsprop, adagrad, adadelta, adam, adamax, nadam. (Default: 'adam', for adam optimizer) :param l2reg: coefficients for L2-regularization (Default: 0.01) :param bias_l2reg: coefficients for L2-regularization for bias (Default: 0.01) :param nb_epoch: number of epochs for training (Default: 1000) :return: None :type classdict: dict :type optimizer: str :type l2reg: float :type bias_l2reg: float :type nb_epoch: int """ # register self.register_classifiers() self.register_classlabels(classdict.keys()) kmodel = Sequential() kmodel.add(Reshape((len(self.classifier2idx) * len(self.labels2idx),), input_shape=(len(self.classifier2idx), len(self.labels2idx)))) kmodel.add(Dense(units=len(classdict), activation='sigmoid', kernel_regularizer=l2(l2reg), bias_regularizer=l2(bias_l2reg)) ) kmodel.compile(loss='categorical_crossentropy', optimizer=optimizer) Xy = [(xone, yone) for xone, yone in self.convert_traindata_matrix(classdict, tobucket=True)] X = np.array([item[0] for item in Xy]) y = np.array([item[1] for item in Xy]) kmodel.fit(X, y, epochs=nb_epoch) self.model = kmodel self.trained = True
Example #30
Source File: captcha_gan.py From Intelligent-Projects-Using-Python with MIT License | 5 votes |
def generator(input_dim,alpha=0.2): model = Sequential() model.add(Dense(input_dim=input_dim, output_dim=4*4*512)) model.add(Reshape(target_shape=(4,4,512))) model.add(BatchNormalization()) model.add(LeakyReLU(alpha)) model.add(Conv2DTranspose(256, kernel_size=5, strides=2, padding='same')) model.add(BatchNormalization()) model.add(LeakyReLU(alpha)) model.add(Conv2DTranspose(128, kernel_size=5, strides=2, padding='same')) model.add(BatchNormalization()) model.add(LeakyReLU(alpha)) model.add(Conv2DTranspose(3, kernel_size=5, strides=2, padding='same')) model.add(Activation('tanh')) return model #Define the Discriminator Network