Python keras.backend.constant() Examples
The following are 30
code examples of keras.backend.constant().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example #1
Source File: utils.py From semantic-embeddings with MIT License | 6 votes |
def devise_ranking_loss(embedding, margin = 0.1): """ The ranking loss used by DeViSE. # Arguments: - embedding: 2-d numpy array whose rows are class embeddings. - margin: margin for the ranking loss. # Returns: a Keras loss function taking y_true and y_pred as inputs and returning a loss tensor. """ def _loss(y_true, y_pred): embedding_t = K.constant(embedding.T) true_sim = K.sum(y_true * y_pred, axis = -1) other_sim = K.dot(y_pred, embedding_t) return K.sum(K.relu(margin - true_sim[:,None] + other_sim), axis = -1) - margin return _loss
Example #2
Source File: attention.py From keras-transformer with MIT License | 6 votes |
def mask_attention_if_needed(self, dot_product): """ Makes sure that (when enabled) each position (of a decoder's self-attention) cannot attend to subsequent positions. This is achieved by assigning -inf (or some large negative number) to all invalid connections. Later softmax will turn them into zeros. We need this to guarantee that decoder's predictions are based on what has happened before the position, not after. The method does nothing if masking is turned off. :param dot_product: scaled dot-product of Q and K after reshaping them to 3D tensors (batch * num_heads, rows, cols) """ if not self.use_masking: return dot_product last_dims = K.int_shape(dot_product)[-2:] low_triangle_ones = ( np.tril(np.ones(last_dims)) # to ensure proper broadcasting .reshape((1,) + last_dims)) inverse_low_triangle = 1 - low_triangle_ones close_to_negative_inf = -1e9 result = ( K.constant(low_triangle_ones, dtype=K.floatx()) * dot_product + K.constant(close_to_negative_inf * inverse_low_triangle)) return result
Example #3
Source File: position.py From keras-transformer with MIT License | 6 votes |
def positional_signal(hidden_size: int, length: int, min_timescale: float = 1.0, max_timescale: float = 1e4): """ Helper function, constructing basic positional encoding. The code is partially based on implementation from Tensor2Tensor library https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/layers/common_attention.py """ if hidden_size % 2 != 0: raise ValueError( f"The hidden dimension of the model must be divisible by 2." f"Currently it is {hidden_size}") position = K.arange(0, length, dtype=K.floatx()) num_timescales = hidden_size // 2 log_timescale_increment = K.constant( (np.log(float(max_timescale) / float(min_timescale)) / (num_timescales - 1)), dtype=K.floatx()) inv_timescales = ( min_timescale * K.exp(K.arange(num_timescales, dtype=K.floatx()) * -log_timescale_increment)) scaled_time = K.expand_dims(position, 1) * K.expand_dims(inv_timescales, 0) signal = K.concatenate([K.sin(scaled_time), K.cos(scaled_time)], axis=1) return K.expand_dims(signal, axis=0)
Example #4
Source File: transformer.py From keras-transformer with MIT License | 6 votes |
def __init__(self, halt_epsilon=0.01, time_penalty=0.01, **kwargs): """ :param halt_epsilon: a small constant that allows computation to halt after a single update (sigmoid never reaches exactly 1.0) :param time_penalty: parameter that weights the relative cost of computation versus error. The larger it is, the less computational steps the network will try to make and vice versa. The default value of 0.01 works well for Transformer. :param kwargs: Any standard parameters for a layer in Keras (like name) """ self.halt_epsilon = halt_epsilon self.time_penalty = time_penalty self.ponder_cost = None self.weighted_output = None self.zeros_like_input = None self.zeros_like_halting = None self.ones_like_halting = None self.halt_budget = None self.remainder = None self.active_steps = None super().__init__(**kwargs)
Example #5
Source File: keras_yolov3.py From perceptron-benchmark with Apache License 2.0 | 6 votes |
def _target_class_loss( self, target_class, box_scores, box_class_probs_logits): """ Evaluate target_class_loss w.r.t. the input. """ box_scores = K.squeeze(box_scores, axis=0) box_class_probs_logits = K.squeeze(box_class_probs_logits, axis=0) import tensorflow as tf boi_idx = tf.where(box_scores[:, target_class] > self._score) loss_box_class_conf = tf.reduce_mean( tf.gather(box_class_probs_logits[:, target_class], boi_idx)) # Avoid the propagation of nan return tf.cond( tf.is_nan(loss_box_class_conf), lambda: tf.constant(0.), lambda: loss_box_class_conf)
Example #6
Source File: loss.py From maskrcnn with MIT License | 6 votes |
def offsets_loss(gt_offsets, pred_offsets, dump=False): """オフセット回帰の損失関数 positive(gt_fg > 0)データのみ評価対象とする Args: gt_offsets: 正解オフセット [R, 4] 3軸目は領域提案とアンカーのオフセット(中心、幅、高さ)。 (tx, ty, th, tw) pred_offsets: 予測値 [R, 4]. Note: この関数の呼び出し元はrpn_offsets_lossとhead_offsets_loss。 RPNでのRoI予測が外れると全てNegativeなBBoxとなり、結果的にhead_offsets_lossへ渡される正解データのラベルが全てNegativeとなる。 その場合、head_offsets_lossで得られる損失は0となるが、rpn_offsets_lossで得られる損失は大きくなるはずなので、 損失全体(rpn_offsets_loss + head_offsets_loss)で評価すれば適切な損失になるはず。 """ loss = K.switch(tf.size(gt_offsets) > 0, smooth_l1(gt_offsets, pred_offsets), tf.constant(0.0)) loss = K.mean(loss) return loss
Example #7
Source File: loss.py From maskrcnn with MIT License | 6 votes |
def labels_loss(gt, pred): """ラベル分類の損失関数 gt: 正解 [N, R] 2軸目はラベルを示すID pred: 予測値(softmax済み) [N, R, labels]. """ # 交差エントロピー誤差 # バッチ毎の計算ではなく、全体の平均値でOK。 # 論文に以下の記載がある。 # In our current implementation (as in the released code), # the cls term in Eqn.(1) is normalized by the mini-batch size # (i.e., Ncls = 256) and the reg term is normalized by the number of # anchor locations (i.e., Nreg ∼ 2, 400). gt = K.cast(gt, 'int32') loss = K.switch(tf.size(gt) > 0, sparse_categorical_crossentropy(gt, pred), K.constant(0.0)) loss = K.mean(loss) return loss
Example #8
Source File: core.py From text-detection-ocr with Apache License 2.0 | 6 votes |
def _rpn_loss_regr(y_true, y_pred): """ smooth L1 loss y_ture [1][HXWX10][3] (class,regr) y_pred [1][HXWX10][2] (reger) """ sigma = 9.0 cls = y_true[0, :, 0] regr = y_true[0, :, 1:3] regr_keep = tf.where(K.equal(cls, 1))[:, 0] regr_true = tf.gather(regr, regr_keep) regr_pred = tf.gather(y_pred[0], regr_keep) diff = tf.abs(regr_true - regr_pred) less_one = tf.cast(tf.less(diff, 1.0 / sigma), 'float32') loss = less_one * 0.5 * diff ** 2 * sigma + tf.abs(1 - less_one) * (diff - 0.5 / sigma) loss = K.sum(loss, axis=1) return K.switch(tf.size(loss) > 0, K.mean(loss), K.constant(0.0))
Example #9
Source File: keras2_emitter.py From MMdnn with MIT License | 6 votes |
def _layer_Shape(self): self.add_body(0, ''' def __shape(input): return Lambda(lambda x: tf.shape(x))(input) ''') # def _layer_Constant(self): # self.add_body(0, ''' # class my_constant(keras.layers.Layer): # def __init__(self, value, **kwargs): # super(my_constant, self).__init__(**kwargs) # self._value = value # # the input is dummy, just for creating keras graph. # def call(self, dummy): # res = K.constant(self._value) # self.output_shapes = K.int_shape(res) # return res # def compute_output_shape(self, input_shape): # return self.output_shapes # ''')
Example #10
Source File: keras2_emitter.py From MMdnn with MIT License | 6 votes |
def emit_Pad(self, IR_node, in_scope=False): mode = IR_node.get_attr('mode', 'constant') mode = mode.lower() if mode == "constant": func = "ZeroPadding" else: raise NotImplementedError() dim = len(IR_node.get_attr('pads')) // 2 - 2 padding = self._convert_padding(IR_node.get_attr('pads')) code = "{:<15} = layers.{}{}D(name='{}', padding={})({})".format( IR_node.variable_name, func, dim, IR_node.name, padding, self.parent_variable_name(IR_node)) return code
Example #11
Source File: hadamard.py From landmark-recognition-challenge with GNU General Public License v3.0 | 6 votes |
def build(self, input_shape): hadamard_size = 2 ** int(math.ceil(math.log(max(input_shape[1], self.output_dim), 2))) self.hadamard = K.constant( value=hadamard(hadamard_size, dtype=np.int8)[:input_shape[1], :self.output_dim]) init_scale = 1. / math.sqrt(self.output_dim) self.scale = self.add_weight(name='scale', shape=(1,), initializer=Constant(init_scale), trainable=True) if self.use_bias: self.bias = self.add_weight(name='bias', shape=(self.output_dim,), initializer=RandomUniform(-init_scale, init_scale), trainable=True) super(HadamardClassifier, self).build(input_shape)
Example #12
Source File: gen_adversarial2.py From MalConv-keras with MIT License | 6 votes |
def fgsm(model, inp, pad_idx, pad_len, e, step_size=0.001, target_class=1): adv = inp.copy() loss = K.mean(model.output[:, target_class]) grads = K.gradients(loss, model.layers[1].output)[0] grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-8) mask = np.zeros(model.layers[1].output.shape[1:]) # embedding layer output shape mask[pad_idx:pad_idx+pad_len] = 1 grads *= K.constant(mask) iterate = K.function([model.layers[1].output], [loss, grads]) g = 0. step = int(1/step_size)*10 for _ in range(step): loss_value, grads_value = iterate([adv]) grads_value *= step_size g += grads_value adv += grads_value #print (e, loss_value, grads_value.mean(), end='\r') if loss_value >= 0.9: break return adv, g, loss_value
Example #13
Source File: gen_adversarial.py From MalConv-keras with MIT License | 6 votes |
def fgsm(model, inp, pad_idx, pad_len, e, step_size=0.001): adv = inp.copy() loss = K.mean(model.output[:, 0]) grads = K.gradients(loss, model.layers[1].output)[0] grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-8) mask = np.zeros(model.layers[1].output.shape[1:]) # embedding layer output shape mask[pad_idx:pad_idx+pad_len] = 1 grads *= K.constant(mask) iterate = K.function([model.layers[1].output], [loss, grads]) g = 0. step = int(1/step_size)*10 for _ in range(step): loss_value, grads_value = iterate([adv]) grads_value *= step_size g += grads_value adv += grads_value #print (e, loss_value, end='\r') if loss_value >= 0.9: break return adv, g, loss_value
Example #14
Source File: keras2_emitter.py From MMdnn with MIT License | 6 votes |
def _emit_h_zero(self, IR_node): if not self.layers_codes.get(IR_node.pattern, None): class_code = ''' class my_h_zero(keras.layers.Layer): def __init__(self, **kwargs): super(my_h_zero, self).__init__(**kwargs) def call(self, dummy): {:<15} = K.constant(np.full((1, {}), {})) return {} '''.format(IR_node.variable_name, IR_node.get_attr('fill_size'), IR_node.get_attr('fill_value'), IR_node.variable_name) self.layers_codes[IR_node.pattern] = class_code code = "{:<15} = my_h_zero()({})".format(IR_node.variable_name, self.parent_variable_name(IR_node)) return code
Example #15
Source File: backend.py From bert4keras with Apache License 2.0 | 5 votes |
def piecewise_linear(t, schedule): """分段线性函数 其中schedule是形如{1000: 1, 2000: 0.1}的字典, 表示 t ∈ [0, 1000]时,输出从0均匀增加至1,而 t ∈ [1000, 2000]时,输出从1均匀降低到0.1,最后 t > 2000时,保持0.1不变。 """ schedule = sorted(schedule.items()) if schedule[0][0] != 0: schedule = [(0, 0.0)] + schedule x = K.constant(schedule[0][1], dtype=K.floatx()) t = K.cast(t, K.floatx()) for i in range(len(schedule)): t_begin = schedule[i][0] x_begin = x if i != len(schedule) - 1: dx = schedule[i + 1][1] - schedule[i][1] dt = schedule[i + 1][0] - schedule[i][0] slope = 1.0 * dx / dt x = schedule[i][1] + slope * (t - t_begin) else: x = K.constant(schedule[i][1], dtype=K.floatx()) x = K.switch(t >= t_begin, x, x_begin) return x
Example #16
Source File: model.py From keras-YOLOv3-mobilenet with MIT License | 5 votes |
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False): """Convert final layer features to bounding box parameters.""" num_anchors = len(anchors) # Reshape to batch, height, width, num_anchors, box_params. anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2]) grid_shape = K.shape(feats)[1:3] # height, width grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]), [1, grid_shape[1], 1, 1]) grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]), [grid_shape[0], 1, 1, 1]) grid = K.concatenate([grid_x, grid_y]) grid = K.cast(grid, K.dtype(feats)) feats = K.reshape( feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]) # Adjust preditions to each spatial grid point and anchor size. box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats)) box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats)) box_confidence = K.sigmoid(feats[..., 4:5]) box_class_probs = K.sigmoid(feats[..., 5:]) if calc_loss == True: return grid, feats, box_xy, box_wh return box_xy, box_wh, box_confidence, box_class_probs
Example #17
Source File: keras_utils.py From mimic3-benchmarks with MIT License | 5 votes |
def softmax(x, axis, mask=None): if mask is None: mask = K.constant(True) mask = K.cast(mask, K.floatx()) if K.ndim(x) is K.ndim(mask) + 1: mask = K.expand_dims(mask) m = K.max(x, axis=axis, keepdims=True) e = K.exp(x - m) * mask s = K.sum(e, axis=axis, keepdims=True) s += K.cast(K.cast(s < K.epsilon(), K.floatx()) * K.epsilon(), K.floatx()) return e / s
Example #18
Source File: cornell_grasp_dataset_reader.py From costar_plan with Apache License 2.0 | 5 votes |
def filter_grasp_success_only(x, verbose=0): """ Only traverse data where grasp_success is true """ should_filter = tf.equal(x['grasp_success'][0], K.constant(1, 'int64')) if verbose: should_filter = tf.Print(should_filter, [should_filter, x['grasp_success']], 'filter_fn x should filter, grasp_success ') return should_filter
Example #19
Source File: metrics.py From keras-metrics with MIT License | 5 votes |
def __init__(self, name=None, label=0, cast_strategy=None, **kwargs): super(layer, self).__init__(name=name, **kwargs) self.stateful = True self.label = label self.cast_strategy = cast_strategy self.epsilon = K.constant(K.epsilon(), dtype="float64")
Example #20
Source File: model.py From deep_sort_yolov3 with MIT License | 5 votes |
def yolo_head(feats, anchors, num_classes, input_shape): """Convert final layer features to bounding box parameters.""" num_anchors = len(anchors) # Reshape to batch, height, width, num_anchors, box_params. anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2]) grid_shape = K.shape(feats)[1:3] # height, width grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]), [1, grid_shape[1], 1, 1]) grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]), [grid_shape[0], 1, 1, 1]) grid = K.concatenate([grid_x, grid_y]) grid = K.cast(grid, K.dtype(feats)) feats = K.reshape( feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]) box_xy = K.sigmoid(feats[..., :2]) box_wh = K.exp(feats[..., 2:4]) box_confidence = K.sigmoid(feats[..., 4:5]) box_class_probs = K.sigmoid(feats[..., 5:]) # Adjust preditions to each spatial grid point and anchor size. box_xy = (box_xy + grid) / K.cast(grid_shape[::-1], K.dtype(feats)) box_wh = box_wh * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats)) return box_xy, box_wh, box_confidence, box_class_probs
Example #21
Source File: losses_test.py From faceswap with GNU General Public License v3.0 | 5 votes |
def test_dssim_channels_last(dummy): # pylint:disable=unused-argument """ Basic test for DSSIM Loss """ prev_data = K.image_data_format() K.set_image_data_format('channels_last') for input_dim, kernel_size in zip([32, 33], [2, 3]): input_shape = [input_dim, input_dim, 3] var_x = np.random.random_sample(4 * input_dim * input_dim * 3) var_x = var_x.reshape([4] + input_shape) var_y = np.random.random_sample(4 * input_dim * input_dim * 3) var_y = var_y.reshape([4] + input_shape) model = Sequential() model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape, activation='relu')) model.add(Conv2D(3, (3, 3), padding='same', input_shape=input_shape, activation='relu')) adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8) model.compile(loss=losses.DSSIMObjective(kernel_size=kernel_size), metrics=['mse'], optimizer=adam) model.fit(var_x, var_y, batch_size=2, epochs=1, shuffle='batch') # Test same x_1 = K.constant(var_x, 'float32') x_2 = K.constant(var_x, 'float32') dssim = losses.DSSIMObjective(kernel_size=kernel_size) assert_allclose(0.0, K.eval(dssim(x_1, x_2)), atol=1e-4) # Test opposite x_1 = K.zeros([4] + input_shape) x_2 = K.ones([4] + input_shape) dssim = losses.DSSIMObjective(kernel_size=kernel_size) assert_allclose(0.5, K.eval(dssim(x_1, x_2)), atol=1e-4) K.set_image_data_format(prev_data)
Example #22
Source File: s3fd.py From faceswap with GNU General Public License v3.0 | 5 votes |
def call(self, *args): # pylint:disable=arguments-differ data = K.constant(self._constant, dtype=self._dtype) return data
Example #23
Source File: model.py From keras-yolo3 with MIT License | 5 votes |
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False): """Convert final layer features to bounding box parameters.""" num_anchors = len(anchors) # Reshape to batch, height, width, num_anchors, box_params. anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2]) grid_shape = K.shape(feats)[1:3] # height, width grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]), [1, grid_shape[1], 1, 1]) grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]), [grid_shape[0], 1, 1, 1]) grid = K.concatenate([grid_x, grid_y]) grid = K.cast(grid, K.dtype(feats)) feats = K.reshape( feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]) # Adjust preditions to each spatial grid point and anchor size. box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats)) box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats)) box_confidence = K.sigmoid(feats[..., 4:5]) box_class_probs = K.sigmoid(feats[..., 5:]) if calc_loss == True: return grid, feats, box_xy, box_wh return box_xy, box_wh, box_confidence, box_class_probs
Example #24
Source File: yolov3.py From keras-onnx with MIT License | 5 votes |
def call(self, inputs, **kwargs): boxes = inputs[0] box_scores = inputs[1] box_scores_transpose = tf.transpose(box_scores, perm=[1, 0]) boxes_number = tf.shape(boxes)[0] box_range = tf.range(boxes_number) mask = box_scores >= self.score_threshold max_boxes_tensor = K.constant(self.max_boxes, dtype='int32') classes_ = [] batch_indexs_ = [] nms_indexes_ = [] class_box_range_ = [] for c in range(self.num_classes): class_boxes = tf.boolean_mask(boxes, mask[:, c]) class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c]) class_box_range = tf.boolean_mask(box_range, mask[:, c]) nms_index = tf.image.non_max_suppression( class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=self.iou_threshold) class_box_scores = K.gather(class_box_scores, nms_index) class_box_range = K.gather(class_box_range, nms_index) classes = K.ones_like(class_box_scores, 'int32') * c batch_index = K.zeros_like(class_box_scores, 'int32') batch_indexs_.append(batch_index) classes_.append(classes) nms_indexes_.append(nms_index) class_box_range_.append(class_box_range) classes_ = K.concatenate(classes_, axis=0) batch_indexs_ = K.concatenate(batch_indexs_, axis=0) class_box_range_ = K.concatenate(class_box_range_, axis=0) boxes_1 = tf.expand_dims(boxes, 0) classes_1 = tf.expand_dims(classes_, 1) batch_indexs_ = tf.expand_dims(batch_indexs_, 1) class_box_range_ = tf.expand_dims(class_box_range_, 1) box_scores_transpose_1 = tf.expand_dims(box_scores_transpose, 0) nms_final_ = K.concatenate([batch_indexs_, classes_1, class_box_range_], axis=1) nms_final_1 = tf.expand_dims(nms_final_, 0) return [boxes_1, box_scores_transpose_1, nms_final_1]
Example #25
Source File: dssim_test.py From keras-contrib with MIT License | 5 votes |
def test_DSSIM_channels_last(): prev_data = K.image_data_format() K.set_image_data_format('channels_last') for input_dim, kernel_size in zip([32, 33], [2, 3]): input_shape = [input_dim, input_dim, 3] X = np.random.random_sample(4 * input_dim * input_dim * 3) X = X.reshape([4] + input_shape) y = np.random.random_sample(4 * input_dim * input_dim * 3) y = y.reshape([4] + input_shape) model = Sequential() model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape, activation='relu')) model.add(Conv2D(3, (3, 3), padding='same', input_shape=input_shape, activation='relu')) adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8) model.compile(loss=DSSIMObjective(kernel_size=kernel_size), metrics=['mse'], optimizer=adam) model.fit(X, y, batch_size=2, epochs=1, shuffle='batch') # Test same x1 = K.constant(X, 'float32') x2 = K.constant(X, 'float32') dssim = DSSIMObjective(kernel_size=kernel_size) assert_allclose(0.0, K.eval(dssim(x1, x2)), atol=1e-4) # Test opposite x1 = K.zeros([4] + input_shape) x2 = K.ones([4] + input_shape) dssim = DSSIMObjective(kernel_size=kernel_size) assert_allclose(0.5, K.eval(dssim(x1, x2)), atol=1e-4) K.set_image_data_format(prev_data)
Example #26
Source File: transformer.py From keras-transformer with MIT License | 5 votes |
def build(self, input_shape): assert len(input_shape) == 3 _, sequence_length, d_model = input_shape self.halting_kernel = self.add_weight( name='halting_kernel', shape=(d_model, 1), initializer='glorot_uniform', trainable=True) self.halting_biases = self.add_weight( name='halting_biases', shape=(1,), initializer=initializers.Constant(0.1), trainable=True) self.time_penalty_t = K.constant(self.time_penalty, dtype=K.floatx()) return super().build(input_shape)
Example #27
Source File: transformer.py From keras-transformer with MIT License | 5 votes |
def call(self, inputs, **kwargs): mean = K.mean(inputs, axis=self.axis, keepdims=True) variance = K.mean( K.square(inputs - mean), axis=self.axis, keepdims=True) epsilon = K.constant(1e-5, dtype=K.floatx()) normalized_inputs = (inputs - mean) / K.sqrt(variance + epsilon) result = self.gain * normalized_inputs + self.bias return result
Example #28
Source File: utils.py From MMdnn with MIT License | 5 votes |
def yolo_head(feats, anchors, num_classes, input_shape): """Convert final layer features to bounding box parameters.""" num_anchors = len(anchors) # Reshape to batch, height, width, num_anchors, box_params. anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2]) conv_dims = K.shape(feats)[1:3] conv_height_index = K.arange(0, stop=conv_dims[1]) conv_width_index = K.arange(0, stop=conv_dims[0]) conv_height_index = K.tile(conv_height_index, [conv_dims[0]]) conv_width_index = K.tile( K.expand_dims(conv_width_index, 0), [conv_dims[1], 1]) conv_width_index = K.flatten(K.transpose(conv_width_index)) conv_index = K.transpose(K.stack([conv_height_index, conv_width_index])) conv_index = K.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2]) conv_index = K.cast(conv_index, K.dtype(feats)) feats = K.reshape( feats, [-1, conv_dims[0], conv_dims[1], num_anchors, num_classes + 5]) conv_dims = K.cast(conv_dims[::-1], K.dtype(feats)) box_xy = K.sigmoid(feats[..., :2]) box_wh = K.exp(feats[..., 2:4]) box_confidence = K.sigmoid(feats[..., 4:5]) box_class_probs = K.sigmoid(feats[..., 5:]) # Adjust preditions to each spatial grid point and anchor size. # Note: YOLO iterates over height index before width index. # TODO: It works with +1, don't know why. box_xy = (box_xy + conv_index + 1) / conv_dims box_wh = box_wh * anchors_tensor / K.cast(input_shape[::-1], K.dtype(box_wh)) return box_xy, box_wh, box_confidence, box_class_probs
Example #29
Source File: model.py From keras-yolov3-KF-objectTracking with MIT License | 5 votes |
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False): """Convert final layer features to bounding box parameters.""" num_anchors = len(anchors) # Reshape to batch, height, width, num_anchors, box_params. anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2]) grid_shape = K.shape(feats)[1:3] # height, width grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]), [1, grid_shape[1], 1, 1]) grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]), [grid_shape[0], 1, 1, 1]) grid = K.concatenate([grid_x, grid_y]) grid = K.cast(grid, K.dtype(feats)) feats = K.reshape( feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]) # Adjust preditions to each spatial grid point and anchor size. box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats)) box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats)) box_confidence = K.sigmoid(feats[..., 4:5]) box_class_probs = K.sigmoid(feats[..., 5:]) if calc_loss == True: return grid, feats, box_xy, box_wh return box_xy, box_wh, box_confidence, box_class_probs
Example #30
Source File: keras2_emitter.py From MMdnn with MIT License | 5 votes |
def emit_Constant(self, IR_node, in_scope=False): if in_scope: if IR_node.get_attr('value'): code = "{:<15} = K.constant({})".format(IR_node.variable_name, IR_node.get_attr('value')) else: code = "{:<15} = K.constant(weights_dict['{}']['value'])".format(IR_node.variable_name, IR_node.name) return code else: pass