Python keras.backend.concatenate() Examples
The following are 30
code examples of keras.backend.concatenate().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example #1
Source File: keras_yolo.py From object-detection with MIT License | 6 votes |
def yolo_body(inputs, num_anchors, num_classes): """Create YOLO_V2 model CNN body in Keras.""" darknet = Model(inputs, darknet_body()(inputs)) conv20 = compose( DarknetConv2D_BN_Leaky(1024, (3, 3)), DarknetConv2D_BN_Leaky(1024, (3, 3)))(darknet.output) conv13 = darknet.layers[43].output conv21 = DarknetConv2D_BN_Leaky(64, (1, 1))(conv13) # TODO: Allow Keras Lambda to use func arguments for output_shape? conv21_reshaped = Lambda( space_to_depth_x2, output_shape=space_to_depth_x2_output_shape, name='space_to_depth')(conv21) x = concatenate([conv21_reshaped, conv20]) x = DarknetConv2D_BN_Leaky(1024, (3, 3))(x) x = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x) return Model(inputs, x)
Example #2
Source File: model.py From YOLO-3D-Box with MIT License | 6 votes |
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape): '''Get corrected boxes''' box_yx = box_xy[..., ::-1] box_hw = box_wh[..., ::-1] input_shape = K.cast(input_shape, K.dtype(box_yx)) image_shape = K.cast(image_shape, K.dtype(box_yx)) new_shape = K.round(image_shape * K.min(input_shape/image_shape)) offset = (input_shape-new_shape)/2./input_shape scale = input_shape/new_shape box_yx = (box_yx - offset) * scale box_hw *= scale box_mins = box_yx - (box_hw / 2.) box_maxes = box_yx + (box_hw / 2.) boxes = K.concatenate([ box_mins[..., 0:1], # y_min box_mins[..., 1:2], # x_min box_maxes[..., 0:1], # y_max box_maxes[..., 1:2] # x_max ]) # Scale boxes back to original image shape. boxes *= K.concatenate([image_shape, image_shape]) return boxes
Example #3
Source File: squeezeDet.py From squeezedet-keras with MIT License | 6 votes |
def _pad(self, input): """ pads the network output so y_pred and y_true have the same dimensions :param input: previous layer :return: layer, last dimensions padded for 4 """ #pad = K.placeholder( (None,self.config.ANCHORS, 4)) #pad = np.zeros ((self.config.BATCH_SIZE,self.config.ANCHORS, 4)) #return K.concatenate( [input, pad], axis=-1) padding = np.zeros((3,2)) padding[2,1] = 4 return tf.pad(input, padding ,"CONSTANT") #loss function to optimize
Example #4
Source File: layers.py From DeepLearn with MIT License | 6 votes |
def call(self , x, mask=None): e1=x[0].T e2=x[1].T batch_size = K.shape(x[0])[0] sim = [] V_out = K.dot(self.V, K.concatenate([e1,e2],axis=0)) for i in range(self.k): temp = K.batch_dot(K.dot(e1.T,self.W[i,:,:]),e2.T,axes=1) sim.append(temp) sim=K.reshape(sim,(self.k,batch_size)) tensor_bi_product = self.activation(V_out+sim) tensor_bi_product = K.dot(self.U.T, tensor_bi_product).T return tensor_bi_product
Example #5
Source File: model.py From multi-object-tracking with GNU General Public License v3.0 | 6 votes |
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape): '''Get corrected boxes''' box_yx = box_xy[..., ::-1] box_hw = box_wh[..., ::-1] input_shape = K.cast(input_shape, K.dtype(box_yx)) image_shape = K.cast(image_shape, K.dtype(box_yx)) new_shape = K.round(image_shape * K.min(input_shape/image_shape)) offset = (input_shape-new_shape)/2./input_shape scale = input_shape/new_shape box_yx = (box_yx - offset) * scale box_hw *= scale box_mins = box_yx - (box_hw / 2.) box_maxes = box_yx + (box_hw / 2.) boxes = K.concatenate([ box_mins[..., 0:1], # y_min box_mins[..., 1:2], # x_min box_maxes[..., 0:1], # y_max box_maxes[..., 1:2] # x_max ]) # Scale boxes back to original image shape. boxes *= K.concatenate([image_shape, image_shape]) return boxes
Example #6
Source File: model.py From vision-web-service with MIT License | 6 votes |
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape): '''Get corrected boxes''' box_yx = box_xy[..., ::-1] box_hw = box_wh[..., ::-1] input_shape = K.cast(input_shape, K.dtype(box_yx)) image_shape = K.cast(image_shape, K.dtype(box_yx)) new_shape = K.round(image_shape * K.min(input_shape/image_shape)) offset = (input_shape-new_shape)/2./input_shape scale = input_shape/new_shape box_yx = (box_yx - offset) * scale box_hw *= scale box_mins = box_yx - (box_hw / 2.) box_maxes = box_yx + (box_hw / 2.) boxes = K.concatenate([ box_mins[..., 0:1], # y_min box_mins[..., 1:2], # x_min box_maxes[..., 0:1], # y_max box_maxes[..., 1:2] # x_max ]) # Scale boxes back to original image shape. boxes *= K.concatenate([image_shape, image_shape]) return boxes
Example #7
Source File: model.py From deep_sort_yolov3 with GNU General Public License v3.0 | 6 votes |
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape): '''Get corrected boxes''' box_yx = box_xy[..., ::-1] box_hw = box_wh[..., ::-1] input_shape = K.cast(input_shape, K.dtype(box_yx)) image_shape = K.cast(image_shape, K.dtype(box_yx)) new_shape = K.round(image_shape * K.min(input_shape/image_shape)) offset = (input_shape-new_shape)/2./input_shape scale = input_shape/new_shape box_yx = (box_yx - offset) * scale box_hw *= scale box_mins = box_yx - (box_hw / 2.) box_maxes = box_yx + (box_hw / 2.) boxes = K.concatenate([ box_mins[..., 0:1], # y_min box_mins[..., 1:2], # x_min box_maxes[..., 0:1], # y_max box_maxes[..., 1:2] # x_max ]) # Scale boxes back to original image shape. boxes *= K.concatenate([image_shape, image_shape]) return boxes
Example #8
Source File: ChainCRF.py From elmo-bilstm-cnn-crf with Apache License 2.0 | 6 votes |
def _forward(x, reduce_step, initial_states, U, mask=None): '''Forward recurrence of the linear chain crf.''' def _forward_step(energy_matrix_t, states): alpha_tm1 = states[-1] new_states = reduce_step(K.expand_dims(alpha_tm1, 2) + energy_matrix_t) return new_states[0], new_states U_shared = K.expand_dims(K.expand_dims(U, 0), 0) if mask is not None: mask = K.cast(mask, K.floatx()) mask_U = K.expand_dims(K.expand_dims(mask[:, :-1] * mask[:, 1:], 2), 3) U_shared = U_shared * mask_U inputs = K.expand_dims(x[:, 1:, :], 2) + U_shared inputs = K.concatenate([inputs, K.zeros_like(inputs[:, -1:, :, :])], axis=1) last, values, _ = K.rnn(_forward_step, inputs, initial_states) return last, values
Example #9
Source File: model.py From keras-yolo3-master with MIT License | 6 votes |
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape): '''Get corrected boxes''' box_yx = box_xy[..., ::-1] box_hw = box_wh[..., ::-1] input_shape = K.cast(input_shape, K.dtype(box_yx)) image_shape = K.cast(image_shape, K.dtype(box_yx)) new_shape = K.round(image_shape * K.min(input_shape/image_shape)) offset = (input_shape-new_shape)/2./input_shape scale = input_shape/new_shape box_yx = (box_yx - offset) * scale box_hw *= scale box_mins = box_yx - (box_hw / 2.) box_maxes = box_yx + (box_hw / 2.) boxes = K.concatenate([ box_mins[..., 0:1], # y_min box_mins[..., 1:2], # x_min box_maxes[..., 0:1], # y_max box_maxes[..., 1:2] # x_max ]) # Scale boxes back to original image shape. boxes *= K.concatenate([image_shape, image_shape]) return boxes
Example #10
Source File: ChainCRF.py From elmo-bilstm-cnn-crf with Apache License 2.0 | 6 votes |
def add_boundary_energy(x, b_start=None, b_end=None, mask=None): '''Given the observations x, it adds the start boundary energy b_start (resp. end boundary energy b_end on the start (resp. end) elements and multiplies the mask.''' if mask is None: if b_start is not None: x = K.concatenate([x[:, :1, :] + b_start, x[:, 1:, :]], axis=1) if b_end is not None: x = K.concatenate([x[:, :-1, :], x[:, -1:, :] + b_end], axis=1) else: mask = K.cast(mask, K.floatx()) mask = K.expand_dims(mask, 2) x *= mask if b_start is not None: mask_r = K.concatenate([K.zeros_like(mask[:, :1]), mask[:, :-1]], axis=1) start_mask = K.cast(K.greater(mask, mask_r), K.floatx()) x = x + start_mask * b_start if b_end is not None: mask_l = K.concatenate([mask[:, 1:], K.zeros_like(mask[:, -1:])], axis=1) end_mask = K.cast(K.greater(mask, mask_l), K.floatx()) x = x + end_mask * b_end return x
Example #11
Source File: model.py From keras-yolo3 with MIT License | 6 votes |
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape): '''Get corrected boxes''' box_yx = box_xy[..., ::-1] box_hw = box_wh[..., ::-1] input_shape = K.cast(input_shape, K.dtype(box_yx)) image_shape = K.cast(image_shape, K.dtype(box_yx)) new_shape = K.round(image_shape * K.min(input_shape/image_shape)) offset = (input_shape-new_shape)/2./input_shape scale = input_shape/new_shape box_yx = (box_yx - offset) * scale box_hw *= scale box_mins = box_yx - (box_hw / 2.) box_maxes = box_yx + (box_hw / 2.) boxes = K.concatenate([ box_mins[..., 0:1], # y_min box_mins[..., 1:2], # x_min box_maxes[..., 0:1], # y_max box_maxes[..., 1:2] # x_max ]) # Scale boxes back to original image shape. boxes *= K.concatenate([image_shape, image_shape]) return boxes
Example #12
Source File: transform_rnn.py From View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition with MIT License | 6 votes |
def call(self,x,training=None): deta1 = 0.3 deta2 = 0.3 deta3 = 0.3 seed = np.random.randint(1, 10e6) rng = RandomStreams(seed=seed) theta1 = rng.uniform(size=(x.shape[0],1),low=-deta1,high=deta1,dtype='float32') theta2 = rng.uniform(size=(x.shape[0],1),low=-deta2,high=deta2,dtype='float32') theta3 = rng.uniform(size=(x.shape[0],1),low=-deta3,high=deta3,dtype='float32') theta = K.concatenate([theta1,theta2,theta3],axis=-1) theta = K.tile(theta,x.shape[1]) theta = theta.reshape((x.shape[0], x.shape[1], 3)) theta = theta.reshape((theta.shape[0]*theta.shape[1], theta.shape[2])) M = _fusion(theta) output = _transform_rot(M, x) return K.in_train_phase(output,x,training = training)
Example #13
Source File: keras_yolov3.py From perceptron-benchmark with Apache License 2.0 | 6 votes |
def _correct_boxes( self, box_xy, box_wh, input_shape, image_shape): """Get corrected boxes, which are scaled to original shape.""" box_yx = box_xy[..., ::-1] box_hw = box_wh[..., ::-1] input_shape = K.cast(input_shape, K.dtype(box_yx)) image_shape = K.cast(image_shape, K.dtype(box_yx)) new_shape = K.round(image_shape * K.min(input_shape / image_shape)) offset = (input_shape - new_shape) / 2. / input_shape scale = input_shape / new_shape box_yx = (box_yx - offset) * scale box_hw *= scale box_mins = box_yx - (box_hw / 2.) box_maxes = box_yx + (box_hw / 2.) boxes = K.concatenate([ box_mins[..., 0:1], # y_min box_mins[..., 1:2], # x_min box_maxes[..., 0:1], # y_max box_maxes[..., 1:2] # x_max ]) # Scale boxes back to original image shape. boxes *= K.concatenate([image_shape, image_shape]) return boxes
Example #14
Source File: utils.py From ImageAI with MIT License | 6 votes |
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape): box_yx = box_xy[..., ::-1] box_hw = box_wh[..., ::-1] input_shape = K.cast(input_shape, K.dtype(box_yx)) image_shape = K.cast(image_shape, K.dtype(box_yx)) new_shape = K.round(image_shape * K.min(input_shape/image_shape)) offset = (input_shape-new_shape)/2./input_shape scale = input_shape/new_shape box_yx = (box_yx - offset) * scale box_hw *= scale box_mins = box_yx - (box_hw / 2.) box_maxes = box_yx + (box_hw / 2.) boxes = K.concatenate([ box_mins[..., 0:1], box_mins[..., 1:2], box_maxes[..., 0:1], box_maxes[..., 1:2] ]) boxes *= K.concatenate([image_shape, image_shape]) return boxes
Example #15
Source File: lstm2ntm.py From NTM-Keras with MIT License | 6 votes |
def preprocess_input(self, x): if self.consume_less == 'cpu': if 0 < self.dropout_W < 1: dropout = self.dropout_W else: dropout = 0 input_shape = self.input_spec[0].shape input_dim = input_shape[2] timesteps = input_shape[1] x_i = time_distributed_dense(x, self.W_i, self.b_i, dropout, input_dim, self.output_dim, timesteps) x_f = time_distributed_dense(x, self.W_f, self.b_f, dropout, input_dim, self.output_dim, timesteps) x_c = time_distributed_dense(x, self.W_c, self.b_c, dropout, input_dim, self.output_dim, timesteps) x_o = time_distributed_dense(x, self.W_o, self.b_o, dropout, input_dim, self.output_dim, timesteps) return K.concatenate([x_i, x_f, x_c, x_o], axis=2) else: return x
Example #16
Source File: drmm_utils.py From BERT with Apache License 2.0 | 6 votes |
def _multi_kmax_context_concat(inputs, top_k, poses): x, context_input = inputs idxes, topk_vs = list(), list() for p in poses: val, idx = tf.nn.top_k(tf.slice(x, [0,0,0], [-1,-1, p]), k=top_k, sorted=True, name=None) topk_vs.append(val) idxes.append(idx) concat_topk_max = tf.concat(topk_vs, -1, name='concat_val') concat_topk_idx = tf.concat(idxes, -1, name='concat_idx') # hack that requires the context to have the same shape as similarity matrices # https://stackoverflow.com/questions/41897212/how-to-sort-a-multi-dimensional-tensor-using-the-returned-indices-of-tf-nn-top-k shape = tf.shape(x) mg = tf.meshgrid(*[tf.range(d) for d in (tf.unstack(shape[:(x.get_shape().ndims - 1)]) + [top_k*len(poses)])], indexing='ij') val_contexts = tf.gather_nd(context_input, tf.stack(mg[:-1] + [concat_topk_idx], axis=-1)) return tf.concat([concat_topk_max, val_contexts], axis=-1) # return backend.concatenate([concat_topk_max, val_contexts])
Example #17
Source File: example.py From learning2run with MIT License | 6 votes |
def preprocess(x): return K.concatenate([ x[:,:,0:1] / 360.0, x[:,:,1:3], x[:,:,3:4] / 360.0, x[:,:,4:6], x[:,:,6:18] / 360.0, x[:,:,18:19] - x[:,:,1:2], x[:,:,19:22], x[:,:,28:29] - x[:,:,1:2], x[:,:,29:30], x[:, :, 30:31] - x[:, :, 1:2], x[:, :, 31:32], x[:, :, 32:33] - x[:, :, 1:2], x[:, :, 33:34], x[:, :, 34:35] - x[:, :, 1:2], x[:, :, 35:41], ], axis=2)
Example #18
Source File: attention.py From keras-utility-layer-collection with MIT License | 6 votes |
def step(self, x, states): h = states[0] # states[1] necessary? # comes from the constants X_static = states[-2] # equals K.dot(static_x, self._W1) + self._b2 with X.shape=[bs, L, static_input_dim] total_x_static_prod = states[-1] # expand dims to add the vector which is only valid for this time step # to total_x_prod which is valid for all time steps hw = K.expand_dims(K.dot(h, self._W2), 1) additive_atn = total_x_static_prod + hw attention = K.softmax(K.dot(additive_atn, self._V), axis=1) static_x_weighted = K.sum(attention * X_static, [1]) x = K.dot(K.concatenate([x, static_x_weighted], 1), self._W3) + self._b3 h, new_states = self.layer.cell.call(x, states[:-2]) # append attention to the states to "smuggle" it out of the RNN wrapper attention = K.squeeze(attention, -1) h = K.concatenate([h, attention]) return h, new_states
Example #19
Source File: attention.py From keras-utility-layer-collection with MIT License | 6 votes |
def step(self, x, states): h = states[0] # states[1] necessary? # equals K.dot(X, self._W1) + self._b2 with X.shape=[bs, T, input_dim] total_x_prod = states[-1] # comes from the constants (equals the input sequence) X = states[-2] # expand dims to add the vector which is only valid for this time step # to total_x_prod which is valid for all time steps hw = K.expand_dims(K.dot(h, self._W2), 1) additive_atn = total_x_prod + hw attention = K.softmax(K.dot(additive_atn, self._V), axis=1) x_weighted = K.sum(attention * X, [1]) x = K.dot(K.concatenate([x, x_weighted], 1), self._W3) + self._b3 h, new_states = self.layer.cell.call(x, states[:-2]) return h, new_states
Example #20
Source File: model.py From Vehicle-Detection-and-Tracking-Usig-YOLO-and-Deep-Sort-with-Keras-and-Tensorflow with MIT License | 6 votes |
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape): '''Get corrected boxes''' box_yx = box_xy[..., ::-1] box_hw = box_wh[..., ::-1] input_shape = K.cast(input_shape, K.dtype(box_yx)) image_shape = K.cast(image_shape, K.dtype(box_yx)) new_shape = K.round(image_shape * K.min(input_shape/image_shape)) offset = (input_shape-new_shape)/2./input_shape scale = input_shape/new_shape box_yx = (box_yx - offset) * scale box_hw *= scale box_mins = box_yx - (box_hw / 2.) box_maxes = box_yx + (box_hw / 2.) boxes = K.concatenate([ box_mins[..., 0:1], # y_min box_mins[..., 1:2], # x_min box_maxes[..., 0:1], # y_max box_maxes[..., 1:2] # x_max ]) # Scale boxes back to original image shape. boxes *= K.concatenate([image_shape, image_shape]) return boxes
Example #21
Source File: loss.py From yolo3-keras with MIT License | 5 votes |
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False): num_anchors = len(anchors) # [1, 1, 1, num_anchors, 2] anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2]) # 获得x,y的网格 # (13, 13, 1, 2) grid_shape = K.shape(feats)[1:3] # height, width grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]), [1, grid_shape[1], 1, 1]) grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]), [grid_shape[0], 1, 1, 1]) grid = K.concatenate([grid_x, grid_y]) grid = K.cast(grid, K.dtype(feats)) # (batch_size,13,13,3,85) feats = K.reshape(feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]) # 将预测值调成真实值 # box_xy对应框的中心点 # box_wh对应框的宽和高 box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats)) box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats)) box_confidence = K.sigmoid(feats[..., 4:5]) box_class_probs = K.sigmoid(feats[..., 5:]) # 在计算loss的时候返回如下参数 if calc_loss == True: return grid, feats, box_xy, box_wh return box_xy, box_wh, box_confidence, box_class_probs #---------------------------------------------------# # 用于计算每个预测框与真实框的iou #---------------------------------------------------#
Example #22
Source File: layers.py From FSA-Net with Apache License 2.0 | 5 votes |
def _make_regular_grids(self, batch_size, height, width): # making a single regular grid x_linspace = K_linspace(-1., 1., width) y_linspace = K_linspace(-1., 1., height) x_coordinates, y_coordinates = K_meshgrid(x_linspace, y_linspace) x_coordinates = K.flatten(x_coordinates) y_coordinates = K.flatten(y_coordinates) ones = K.ones_like(x_coordinates) grid = K.concatenate([x_coordinates, y_coordinates, ones], 0) # repeating grids for each batch grid = K.flatten(grid) grids = K.tile(grid, K.stack([batch_size])) return K.reshape(grids, (batch_size, 3, height * width))
Example #23
Source File: utils.py From ImageAI with MIT License | 5 votes |
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False): num_anchors = len(anchors) anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2]) grid_shape = K.shape(feats)[1:3] grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]), [1, grid_shape[1], 1, 1]) grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]), [grid_shape[0], 1, 1, 1]) grid = K.concatenate([grid_x, grid_y]) grid = K.cast(grid, K.dtype(feats)) feats = K.reshape( feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]) box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats)) box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats)) box_confidence = K.sigmoid(feats[..., 4:5]) box_class_probs = K.sigmoid(feats[..., 5:]) if calc_loss == True: return grid, feats, box_xy, box_wh return box_xy, box_wh, box_confidence, box_class_probs
Example #24
Source File: ntm.py From NTM-Keras with MIT License | 5 votes |
def preprocess_input(self, x): print("begin preprocess_input(self, x)") if self.consume_less == 'cpu': if 0 < self.dropout_W < 1: dropout = self.dropout_W else: dropout = 0 input_shape = self.input_spec[0].shape input_dim = input_shape[2] timesteps = input_shape[1] # x_i = time_distributed_dense(x, self.W_i, self.b_i, dropout, # input_dim, self.output_dim, timesteps) # x_f = time_distributed_dense(x, self.W_f, self.b_f, dropout, # input_dim, self.output_dim, timesteps) # x_c = time_distributed_dense(x, self.W_c, self.b_c, dropout, # input_dim, self.output_dim, timesteps) # x_o = time_distributed_dense(x, self.W_o, self.b_o, dropout, # input_dim, self.output_dim, timesteps) # add by Robot Steven ****************************************# x_i = time_distributed_dense(x, self.W_i, self.b_i, dropout, input_dim, self.controller_output_dim, timesteps) x_f = time_distributed_dense(x, self.W_f, self.b_f, dropout, input_dim, self.controller_output_dim, timesteps) x_c = time_distributed_dense(x, self.W_c, self.b_c, dropout, input_dim, self.controller_output_dim, timesteps) x_o = time_distributed_dense(x, self.W_o, self.b_o, dropout, input_dim, self.controller_output_dim, timesteps) # add by Robot Steven ****************************************# print("end preprocess_input(self,x )") return K.concatenate([x_i, x_f, x_c, x_o], axis=2) else: print("end preprocess_input(self,x )\n") return x
Example #25
Source File: customlayers.py From deep-mil-for-whole-mammogram-classification with MIT License | 5 votes |
def call(self, x,mask=None): newx = K.sort(x) #response = K.reverse(newx, axes=1) #response = K.sum(x> 0.5, axis=1) / self.k return K.concatenate([newx[:,:self.softmink], newx[:,newx.shape[1]-self.softmaxk:]], axis=-1) #response = K.reshape(newx,[-1,1]) #return K.concatenate([1-response, response], axis=self.label) #response = K.reshape(x[:,self.axis], (-1,1)) #return K.concatenate([1-response, response], axis=self.axis) #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True)) #s = K.sum(e, axis=self.axis, keepdims=True) #return e / s
Example #26
Source File: model.py From YOLO-3D-Box with MIT License | 5 votes |
def yolo_head(feats, anchors, num_classes, input_shape): """Convert final layer features to bounding box parameters.""" num_anchors = len(anchors) # Reshape to batch, height, width, num_anchors, box_params. anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2]) grid_shape = K.shape(feats)[1:3] # height, width grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]), [1, grid_shape[1], 1, 1]) grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]), [grid_shape[0], 1, 1, 1]) grid = K.concatenate([grid_x, grid_y]) grid = K.cast(grid, K.dtype(feats)) feats = K.reshape( feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]) box_xy = K.sigmoid(feats[..., :2]) box_wh = K.exp(feats[..., 2:4]) box_confidence = K.sigmoid(feats[..., 4:5]) box_class_probs = K.sigmoid(feats[..., 5:]) # Adjust preditions to each spatial grid point and anchor size. box_xy = (box_xy + grid) / K.cast(grid_shape[::-1], K.dtype(feats)) box_wh = box_wh * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats)) return box_xy, box_wh, box_confidence, box_class_probs
Example #27
Source File: customlayers.py From deep-mil-for-whole-mammogram-classification with MIT License | 5 votes |
def call(self, x,mask=None): newx = K.sort(x) #response = K.reverse(newx, axes=1) #response = K.sum(x> 0.5, axis=1) / self.k return K.concatenate([newx[:,:self.softmink], newx[:,newx.shape[1]-self.softmaxk:]], axis=-1) #response = K.reshape(newx,[-1,1]) #return K.concatenate([1-response, response], axis=self.label) #response = K.reshape(x[:,self.axis], (-1,1)) #return K.concatenate([1-response, response], axis=self.axis) #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True)) #s = K.sum(e, axis=self.axis, keepdims=True) #return e / s
Example #28
Source File: customlayers.py From deep-mil-for-whole-mammogram-classification with MIT License | 5 votes |
def call(self, x,mask=None): import theano.tensor as T newx = T.sort(x) #response = K.reverse(newx, axes=1) #response = K.sum(x> 0.5, axis=1) / self.k return newx #response = K.reshape(newx,[-1,1]) #return K.concatenate([1-response, response], axis=self.label) #response = K.reshape(x[:,self.axis], (-1,1)) #return K.concatenate([1-response, response], axis=self.axis) #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True)) #s = K.sum(e, axis=self.axis, keepdims=True) #return e / s
Example #29
Source File: customlayers.py From deep-mil-for-whole-mammogram-classification with MIT License | 5 votes |
def call(self, x,mask=None): response = K.reshape(x[:,self.axis], (-1,1)) return K.concatenate([1-response, response], axis=self.axis) #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True)) #s = K.sum(e, axis=self.axis, keepdims=True) #return e / s
Example #30
Source File: rhn.py From deep-models with Apache License 2.0 | 5 votes |
def preprocess_input(self, x): if self.consume_less == 'cpu': input_shape = self.input_spec[0].shape input_dim = input_shape[2] timesteps = input_shape[1] x_t = time_distributed_dense(x, self.W_t, self.b_t, self.dropout_W, input_dim, self.output_dim, timesteps) x_h = time_distributed_dense(x, self.W_h, self.b_h, self.dropout_W, input_dim, self.output_dim, timesteps) return K.concatenate([x_t, x_h], axis=2) else: return x