Python tensorflow.keras.backend.expand_dims() Examples
The following are 30
code examples of tensorflow.keras.backend.expand_dims().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.backend
, or try the search function
.
Example #1
Source File: global_metrics.py From ICCV2019-Horde with MIT License | 6 votes |
def _build_tf_cosine_similarity(max_rank=0, offset=1, eps=1e-12): # We build the graph (See utils.generic_utils.tf_recall_at_k for original implementation): tf_db = K.placeholder(ndim=2, dtype=K.floatx()) # Where to find tf_labels = K.placeholder(ndim=1, dtype=K.floatx()) # and their labels tf_batch_query = K.placeholder(ndim=2, dtype=K.floatx()) # Used in case of memory issues batch_labels = K.placeholder(ndim=2, dtype=K.floatx()) # and their labels all_representations_T = K.expand_dims(tf_db, axis=0) # 1 x D x N batch_representations = K.expand_dims(tf_batch_query, axis=0) # 1 x n x D sim = K.batch_dot(batch_representations, all_representations_T) # 1 x n x N sim = K.squeeze(sim, axis=0) # n x N sim /= tf.linalg.norm(tf_batch_query, axis=1, keepdims=True) + eps sim /= tf.linalg.norm(tf_db, axis=0, keepdims=True) + eps if max_rank > 0: # computing r@K or mAP@K index_ranking = tf.nn.top_k(sim, k=max_rank + offset).indices else: index_ranking = tf.contrib.framework.argsort(sim, axis=-1, direction='DESCENDING', stable=True) top_k = index_ranking[:, offset:] tf_ranking = tf.gather(tf_labels, top_k) return tf_db, tf_labels, tf_batch_query, batch_labels, tf_ranking
Example #2
Source File: backend.py From DeepPoseKit with Apache License 2.0 | 6 votes |
def _find_maxima(x, coordinate_scale=1, confidence_scale=255.0): x = K.cast(x, K.floatx()) col_max = K.max(x, axis=1) row_max = K.max(x, axis=2) maxima = K.max(col_max, 1) maxima = K.expand_dims(maxima, -2) / confidence_scale cols = K.cast(K.argmax(col_max, -2), K.floatx()) rows = K.cast(K.argmax(row_max, -2), K.floatx()) cols = K.expand_dims(cols, -2) * coordinate_scale rows = K.expand_dims(rows, -2) * coordinate_scale maxima = K.concatenate([cols, rows, maxima], -2) return maxima
Example #3
Source File: keras_layers.py From DeepPavlov with Apache License 2.0 | 6 votes |
def call(self, x, **kwargs): assert isinstance(x, list) inp_a, inp_b = x last_state = K.expand_dims(inp_b[:, -1, :], 1) m = [] for i in range(self.output_dim): outp_a = inp_a * self.W[i] outp_last = last_state * self.W[i] outp_a = K.l2_normalize(outp_a, -1) outp_last = K.l2_normalize(outp_last, -1) outp = K.batch_dot(outp_a, outp_last, axes=[2, 2]) m.append(outp) if self.output_dim > 1: persp = K.concatenate(m, 2) else: persp = m[0] return [persp, persp]
Example #4
Source File: RigidTransformation3DImputation.py From aitom with GNU General Public License v3.0 | 6 votes |
def _batch_mgrid(self, n_batch, *args, **kwargs): """ create batch of orthogonal grids similar to np.mgrid Parameters ---------- n_batch : int number of grids to create args : int number of points on each axis low : float minimum coordinate value high : float maximum coordinate value Returns ------- grids : tf.Tensor [n_batch, len(args), args[0], ...] batch of orthogonal grids """ grid = self._mgrid(*args, **kwargs) grid = tf.expand_dims(grid, 0) grids = tf.tile(grid, [n_batch] + [1 for _ in range(len(args) + 1)]) return grids
Example #5
Source File: RigidTransformation3DImputation.py From aitom with GNU General Public License v3.0 | 6 votes |
def _mask_rotation_matrix_zyz(self, params): phi = params[0] * 2 * np.pi - np.pi; theta = params[1] * 2 * np.pi - np.pi; psi_t = params[2] * 2 * np.pi - np.pi; loc_r = params[3:6] * 0 # magnitude of Fourier transformation is translation-invariant a1 = self._rotation_matrix_axis(2, psi_t) a2 = self._rotation_matrix_axis(1, theta) a3 = self._rotation_matrix_axis(2, phi) rm = K.dot(K.dot(a3,a2),a1) rm = tf.transpose(rm) c = K.dot(-rm, K.expand_dims(loc_r)) rm = K.flatten(rm) theta = K.concatenate([rm[:3], c[0], rm[3:6], c[1], rm[6:9], c[2]]) return theta
Example #6
Source File: RigidTransformation3DImputation.py From aitom with GNU General Public License v3.0 | 6 votes |
def _rotation_matrix_zyz(self, params): phi = params[0] * 2 * np.pi - np.pi; theta = params[1] * 2 * np.pi - np.pi; psi_t = params[2] * 2 * np.pi - np.pi; loc_r = params[3:6] * 2 - 1 a1 = self._rotation_matrix_axis(2, psi_t) # first rotate about z axis for angle psi_t a2 = self._rotation_matrix_axis(1, theta) a3 = self._rotation_matrix_axis(2, phi) rm = K.dot(K.dot(a3,a2),a1) rm = tf.transpose(rm) c = K.dot(-rm, K.expand_dims(loc_r)) rm = K.flatten(rm) theta = K.concatenate([rm[:3], c[0], rm[3:6], c[1], rm[6:9], c[2]]) return theta
Example #7
Source File: backend.py From bert4keras with Apache License 2.0 | 6 votes |
def sequence_masking(x, mask, mode=0, axis=None): """为序列条件mask的函数 mask: 形如(batch_size, seq_len)的0-1矩阵; mode: 如果是0,则直接乘以mask; 如果是1,则在padding部分减去一个大正数。 axis: 序列所在轴,默认为1; """ if mask is None or mode not in [0, 1]: return x else: if axis is None: axis = 1 if axis == -1: axis = K.ndim(x) - 1 assert axis > 0, 'axis muse be greater than 0' for _ in range(axis - 1): mask = K.expand_dims(mask, 1) for _ in range(K.ndim(x) - K.ndim(mask) - axis + 1): mask = K.expand_dims(mask, K.ndim(mask)) if mode == 0: return x * mask else: return x - (1 - mask) * 1e12
Example #8
Source File: backend.py From bert4keras with Apache License 2.0 | 6 votes |
def pool1d( x, pool_size, strides=1, padding='valid', data_format=None, pool_mode='max' ): """向量序列的pool函数 """ x = K.expand_dims(x, 1) x = K.pool2d( x, pool_size=(1, pool_size), strides=(1, strides), padding=padding, data_format=data_format, pool_mode=pool_mode ) return x[:, 0]
Example #9
Source File: matching.py From fancy-nlp with GNU General Public License v3.0 | 5 votes |
def call(self, inputs, **kwargs): sent1 = inputs[0] sent2 = inputs[1] v1 = K.expand_dims(sent1, -2) * self.kernel v2 = K.expand_dims(sent2, -2) * self.kernel v1 = K.l2_normalize(v1, axis=-1) v2 = K.l2_normalize(v2, axis=-1) matching = K.sum(v1 * v2, axis=-1) return matching
Example #10
Source File: attn_augconv.py From keras-attention-augmented-convs with MIT License | 5 votes |
def relative_logits_1d(self, q, rel_k, H, W, transpose_mask): rel_logits = tf.einsum('bhxyd,md->bhxym', q, rel_k) rel_logits = K.reshape(rel_logits, [-1, self.num_heads * H, W, 2 * W - 1]) rel_logits = self.rel_to_abs(rel_logits) rel_logits = K.reshape(rel_logits, [-1, self.num_heads, H, W, W]) rel_logits = K.expand_dims(rel_logits, axis=3) rel_logits = K.tile(rel_logits, [1, 1, 1, H, 1, 1]) rel_logits = K.permute_dimensions(rel_logits, transpose_mask) rel_logits = K.reshape(rel_logits, [-1, self.num_heads, H * W, H * W]) return rel_logits
Example #11
Source File: attention.py From fancy-nlp with GNU General Public License v3.0 | 5 votes |
def call(self, inputs, mask=None): """ convert to query, key, value vectors, shaped [batch_size*num_head, time_step, embed_dim] """ multihead_query = K.concatenate(tf.split(K.dot(inputs, self.w_q), self.num_heads, axis=2), axis=0) multihead_key = K.concatenate(tf.split(K.dot(inputs, self.w_k), self.num_heads, axis=2), axis=0) multihead_value = K.concatenate(tf.split(K.dot(inputs, self.w_v), self.num_heads, axis=2), axis=0) """scaled dot product""" scaled = K.int_shape(inputs)[-1] ** -0.5 attend = K.batch_dot(multihead_query, multihead_key, axes=2) * scaled # apply mask before normalization (softmax) if mask is not None: multihead_mask = K.tile(mask, [self.num_heads, 1]) attend *= K.expand_dims(K.cast(multihead_mask, K.floatx()), 2) attend *= K.expand_dims(K.cast(multihead_mask, K.floatx()), 1) # normalization attend = attend / K.cast(K.sum(attend, axis=-1, keepdims=True) + K.epsilon(), K.floatx()) # apply attention attend = K.batch_dot(attend, multihead_value, axes=(2, 1)) attend = tf.concat(tf.split(attend, self.num_heads, axis=0), axis=2) attend = K.dot(attend, self.w_final) if self.residual: attend = attend + inputs if self.normalize: mean = K.mean(attend, axis=-1, keepdims=True) std = K.mean(attend, axis=-1, keepdims=True) attend = self.gamma * (attend - mean) / (std + K.epsilon()) + self.beta return attend
Example #12
Source File: global_metrics.py From ICCV2019-Horde with MIT License | 5 votes |
def _build_tf_l2_similarity(max_rank=0, offset=1): # We build the graph (See utils.generic_utils.tf_recall_at_k for original implementation): tf_db = K.placeholder(ndim=2, dtype=K.floatx()) # Where to find tf_labels = K.placeholder(ndim=1, dtype=K.floatx()) # and their labels tf_batch_query = K.placeholder(ndim=2, dtype=K.floatx()) # Used in case of memory issues batch_labels = K.placeholder(ndim=2, dtype=K.floatx()) # and their labels all_representations_T = K.expand_dims(tf_db, axis=0) # 1 x D x N batch_representations = K.expand_dims(tf_batch_query, axis=0) # 1 x n x D dist = -2. * K.batch_dot(batch_representations, all_representations_T) # 1 x n x N dist = K.squeeze(dist, axis=0) # n x N dist += K.sum(tf_batch_query * tf_batch_query, axis=1, keepdims=True) dist += K.sum(tf_db * tf_db, axis=0, keepdims=True) if max_rank > 0: # computing r@K or mAP@K # top_k finds the k greatest entries and we want the lowest. Note that distance with itself will be last ranked dist = -dist index_ranking = tf.nn.top_k(dist, k=max_rank + offset).indices else: index_ranking = tf.contrib.framework.argsort(dist, axis=-1, direction='ASCENDING', stable=True) index_ranking = index_ranking[:, offset:] tf_ranking = tf.gather(tf_labels, index_ranking) return tf_db, tf_labels, tf_batch_query, batch_labels, tf_ranking
Example #13
Source File: bilstm_gru_siamese_network.py From DeepPavlov with Apache License 2.0 | 5 votes |
def create_model(self) -> Model: input = [] if self.use_matrix: for i in range(self.num_context_turns + 1): input.append(Input(shape=(self.max_sequence_length,))) context = input[:self.num_context_turns] response = input[-1] emb_layer = self.embedding_layer() emb_c = [emb_layer(el) for el in context] emb_r = emb_layer(response) else: for i in range(self.num_context_turns + 1): input.append(Input(shape=(self.max_sequence_length, self.embedding_dim,))) context = input[:self.num_context_turns] response = input[-1] emb_c = context emb_r = response lstm_layer = self.lstm_layer() lstm_c = [lstm_layer(el) for el in emb_c] lstm_r = lstm_layer(emb_r) pooling_layer = GlobalMaxPooling1D(name="pooling") lstm_c = [pooling_layer(el) for el in lstm_c] lstm_r = pooling_layer(lstm_r) lstm_c = [Lambda(lambda x: K.expand_dims(x, 1))(el) for el in lstm_c] lstm_c = Lambda(lambda x: K.concatenate(x, 1))(lstm_c) gru_layer = GRU(2 * self.hidden_dim, name="gru") gru_c = gru_layer(lstm_c) if self.triplet_mode: dist = Lambda(self._pairwise_distances)([gru_c, lstm_r]) else: dist = Lambda(self._diff_mult_dist)([gru_c, lstm_r]) dist = Dense(1, activation='sigmoid', name="score_model")(dist) model = Model(context + [response], dist) return model
Example #14
Source File: bilstm_siamese_network.py From DeepPavlov with Apache License 2.0 | 5 votes |
def _batch_all_triplet_loss(self, y_true: Tensor, pairwise_dist: Tensor) -> Tensor: anchor_positive_dist = K.expand_dims(pairwise_dist, 2) anchor_negative_dist = K.expand_dims(pairwise_dist, 1) triplet_loss = anchor_positive_dist - anchor_negative_dist + self.margin mask = self._get_triplet_mask(y_true, pairwise_dist) triplet_loss = mask * triplet_loss triplet_loss = K.clip(triplet_loss, 0.0, None) valid_triplets = K.cast(K.greater(triplet_loss, 1e-16), K.dtype(triplet_loss)) num_positive_triplets = K.sum(valid_triplets) triplet_loss = K.sum(triplet_loss) / (num_positive_triplets + 1e-16) return triplet_loss
Example #15
Source File: bilstm_siamese_network.py From DeepPavlov with Apache License 2.0 | 5 votes |
def _get_triplet_mask(self, y_true: Tensor, pairwise_dist: Tensor) -> Tensor: # mask label(a) != label(p) mask1 = K.expand_dims(K.equal(K.expand_dims(y_true, 0), K.expand_dims(y_true, 1)), 2) mask1 = K.cast(mask1, K.dtype(pairwise_dist)) # mask a == p mask2 = K.expand_dims(K.not_equal(pairwise_dist, 0.0), 2) mask2 = K.cast(mask2, K.dtype(pairwise_dist)) # mask label(n) == label(a) mask3 = K.expand_dims(K.not_equal(K.expand_dims(y_true, 0), K.expand_dims(y_true, 1)), 1) mask3 = K.cast(mask3, K.dtype(pairwise_dist)) return mask1 * mask2 * mask3
Example #16
Source File: bilstm_siamese_network.py From DeepPavlov with Apache License 2.0 | 5 votes |
def _get_anchor_positive_triplet_mask(self, y_true: Tensor, pairwise_dist: Tensor) -> Tensor: # mask label(a) != label(p) mask1 = K.equal(K.expand_dims(y_true, 0), K.expand_dims(y_true, 1)) mask1 = K.cast(mask1, K.dtype(pairwise_dist)) # mask a == p mask2 = K.not_equal(pairwise_dist, 0.0) mask2 = K.cast(mask2, K.dtype(pairwise_dist)) return mask1 * mask2
Example #17
Source File: bilstm_siamese_network.py From DeepPavlov with Apache License 2.0 | 5 votes |
def _get_anchor_negative_triplet_mask(self, y_true: Tensor, pairwise_dist: Tensor) -> Tensor: # mask label(n) == label(a) mask = K.not_equal(K.expand_dims(y_true, 0), K.expand_dims(y_true, 1)) mask = K.cast(mask, K.dtype(pairwise_dist)) return mask
Example #18
Source File: backend.py From DeepPoseKit with Apache License 2.0 | 5 votes |
def _find_subpixel_maxima( x, kernel_size, sigma, upsample_factor, coordinate_scale=1.0, confidence_scale=1.0 ): kernel = gaussian_kernel_2d(kernel_size, sigma) kernel = tf.expand_dims(kernel, 0) x_shape = tf.shape(x) rows = x_shape[1] cols = x_shape[2] max_vals = tf.reduce_max(tf.reshape(x, [-1, rows * cols]), axis=1) max_vals = tf.reshape(max_vals, [-1, 1]) / confidence_scale row_pad = rows // 2 - kernel_size // 2 col_pad = cols // 2 - kernel_size // 2 padding = [[0, 0], [row_pad, row_pad - 1], [col_pad, col_pad - 1]] kernel = tf.pad(kernel, padding) row_center = row_pad + (kernel_size // 2) col_center = col_pad + (kernel_size // 2) center = tf.stack([row_center, col_center]) center = tf.expand_dims(center, 0) center = tf.cast(center, dtype=tf.float32) shifts = _upsampled_registration(x, kernel, upsample_factor) shifts = center - shifts shifts *= coordinate_scale maxima = tf.concat([shifts[:, ::-1], max_vals], -1) return maxima
Example #19
Source File: loss.py From keras-YOLOv3-model-set with MIT License | 5 votes |
def box_iou(b1, b2): """ Return iou tensor Parameters ---------- b1: tensor, shape=(i1,...,iN, 4), xywh b2: tensor, shape=(j, 4), xywh Returns ------- iou: tensor, shape=(i1,...,iN, j) """ # Expand dim to apply broadcasting. b1 = K.expand_dims(b1, -2) b1_xy = b1[..., :2] b1_wh = b1[..., 2:4] b1_wh_half = b1_wh/2. b1_mins = b1_xy - b1_wh_half b1_maxes = b1_xy + b1_wh_half # Expand dim to apply broadcasting. b2 = K.expand_dims(b2, 0) b2_xy = b2[..., :2] b2_wh = b2[..., 2:4] b2_wh_half = b2_wh/2. b2_mins = b2_xy - b2_wh_half b2_maxes = b2_xy + b2_wh_half intersect_mins = K.maximum(b1_mins, b2_mins) intersect_maxes = K.minimum(b1_maxes, b2_maxes) intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.) intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1] b1_area = b1_wh[..., 0] * b1_wh[..., 1] b2_area = b2_wh[..., 0] * b2_wh[..., 1] iou = intersect_area / (b1_area + b2_area - intersect_area) return iou
Example #20
Source File: loss.py From keras-YOLOv3-model-set with MIT License | 5 votes |
def box_iou(b1, b2): """ Return iou tensor Parameters ---------- b1: tensor, shape=(i1,...,iN, 4), xywh b2: tensor, shape=(j, 4), xywh Returns ------- iou: tensor, shape=(i1,...,iN, j) """ # Expand dim to apply broadcasting. #b1 = K.expand_dims(b1, -2) b1_xy = b1[..., :2] b1_wh = b1[..., 2:4] b1_wh_half = b1_wh/2. b1_mins = b1_xy - b1_wh_half b1_maxes = b1_xy + b1_wh_half # Expand dim to apply broadcasting. b2 = K.expand_dims(b2, 0) b2_xy = b2[..., :2] b2_wh = b2[..., 2:4] b2_wh_half = b2_wh/2. b2_mins = b2_xy - b2_wh_half b2_maxes = b2_xy + b2_wh_half intersect_mins = K.maximum(b1_mins, b2_mins) intersect_maxes = K.minimum(b1_maxes, b2_maxes) intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.) intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1] b1_area = b1_wh[..., 0] * b1_wh[..., 1] b2_area = b2_wh[..., 0] * b2_wh[..., 1] iou = intersect_area / (b1_area + b2_area - intersect_area) return iou
Example #21
Source File: matching.py From fancy-nlp with GNU General Public License v3.0 | 5 votes |
def call(self, inputs, **kwargs): sent1 = inputs[0] sent2 = inputs[1] v1 = K.expand_dims(sent1, -2) * self.kernel v2 = K.expand_dims(sent2, -2) * self.kernel v1 = K.l2_normalize(v1, axis=-1) v2 = K.l2_normalize(v2, axis=-1) matching = K.max(K.sum(K.expand_dims(v1, 2) * K.expand_dims(v2, 1), axis=-1), axis=-2) return matching
Example #22
Source File: matching.py From fancy-nlp with GNU General Public License v3.0 | 5 votes |
def call(self, inputs, **kwargs): sent1 = inputs[0] sent2 = inputs[1] v1 = K.expand_dims(sent1, -2) * self.kernel v2 = self.kernel * K.expand_dims(sent2, 1) v2 = K.expand_dims(v2, 1) v1 = K.l2_normalize(v1, axis=-1) v2 = K.l2_normalize(v2, axis=-1) matching = K.sum(v1 * v2, axis=-1) return matching
Example #23
Source File: shapelets.py From tslearn with BSD 2-Clause "Simplified" License | 5 votes |
def call(self, x, **kwargs): # (x - y)^2 = x^2 + y^2 - 2 * x * y x_sq = K.expand_dims(K.sum(x ** 2, axis=2), axis=-1) y_sq = K.reshape(K.sum(self.kernel ** 2, axis=1), (1, 1, self.n_shapelets)) xy = K.dot(x, K.transpose(self.kernel)) return (x_sq + y_sq - 2 * xy) / K.int_shape(self.kernel)[1]
Example #24
Source File: iic-13.5.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 5 votes |
def mi_loss(self, y_true, y_pred): """Mutual information loss computed from the joint distribution matrix and the marginals Arguments: y_true (tensor): Not used since this is unsupervised learning y_pred (tensor): stack of softmax predictions for the Siamese latent vectors (Z and Zbar) """ size = self.args.batch_size n_labels = y_pred.shape[-1] # lower half is Z Z = y_pred[0: size, :] Z = K.expand_dims(Z, axis=2) # upper half is Zbar Zbar = y_pred[size: y_pred.shape[0], :] Zbar = K.expand_dims(Zbar, axis=1) # compute joint distribution (Eq 10.3.2 & .3) P = K.batch_dot(Z, Zbar) P = K.sum(P, axis=0) # enforce symmetric joint distribution (Eq 10.3.4) P = (P + K.transpose(P)) / 2.0 # normalization of total probability to 1.0 P = P / K.sum(P) # marginal distributions (Eq 10.3.5 & .6) Pi = K.expand_dims(K.sum(P, axis=1), axis=1) Pj = K.expand_dims(K.sum(P, axis=0), axis=0) Pi = K.repeat_elements(Pi, rep=n_labels, axis=1) Pj = K.repeat_elements(Pj, rep=n_labels, axis=0) P = K.clip(P, K.epsilon(), np.finfo(float).max) Pi = K.clip(Pi, K.epsilon(), np.finfo(float).max) Pj = K.clip(Pj, K.epsilon(), np.finfo(float).max) # negative MI loss (Eq 10.3.7) neg_mi = K.sum((P * (K.log(Pi) + K.log(Pj) - K.log(P)))) # each head contribute 1/n_heads to the total loss return neg_mi/self.args.heads
Example #25
Source File: FcDEC.py From DEC-DA with MIT License | 5 votes |
def call(self, inputs, **kwargs): """ student t-distribution, as same as used in t-SNE algorithm. q_ij = 1/(1+dist(x_i, u_j)^2), then normalize it. Arguments: inputs: the variable containing data, shape=(n_samples, n_features) Return: q: student's t-distribution, or soft labels for each sample. shape=(n_samples, n_clusters) """ q = 1.0 / (1.0 + (K.sum(K.square(K.expand_dims(inputs, axis=1) - self.clusters), axis=2) / self.alpha)) q **= (self.alpha + 1.0) / 2.0 q = K.transpose(K.transpose(q) / K.sum(q, axis=1)) return q
Example #26
Source File: layers.py From neuron with GNU General Public License v3.0 | 5 votes |
def call(self, x): x_orig = x # x reshape this_bs_int = K.shape(x)[0] this_bs = tf.cast(this_bs_int, 'float32') # this batch size prev_count = self.count x = K.batch_flatten(x) # B x N # update mean new_mean, new_count = _mean_update(self.mean, self.count, x, self.cap) # new C update. Should be B x N x N x = K.expand_dims(x, -1) C_delta = K.batch_dot(x, K.permute_dimensions(x, [0, 2, 1])) # update cov prev_cap = K.minimum(prev_count, self.cap) C = self.cov * (prev_cap - 1) + K.sum(C_delta, 0) new_cov = C / (prev_cap + this_bs - 1) # updates updates = [(self.count, new_count), (self.mean, new_mean), (self.cov, new_cov)] self.add_update(updates, x_orig) # prep for broadcasting :( p = tf.concat((K.reshape(this_bs_int, (1,)), K.shape(self.cov)), 0) z = tf.ones(p) return K.minimum(1., new_count/self.cap) * (z * K.expand_dims(new_cov, 0))
Example #27
Source File: layers.py From neuron with GNU General Public License v3.0 | 5 votes |
def call(self, x): # get new mean and count this_bs_int = K.shape(x)[0] new_mean, new_count = _mean_update(self.mean, self.count, x, self.cap) # update op updates = [(self.count, new_count), (self.mean, new_mean)] self.add_update(updates, x) # prep for broadcasting :( p = tf.concat((K.reshape(this_bs_int, (1,)), K.shape(self.mean)), 0) z = tf.ones(p) # the first few 1000 should not matter that much towards this cost return K.minimum(1., new_count/self.cap) * (z * K.expand_dims(new_mean, 0))
Example #28
Source File: matching.py From fancy-nlp with GNU General Public License v3.0 | 5 votes |
def call(self, inputs, **kwargs): sent1 = inputs[0] sent2 = inputs[1] v1 = K.expand_dims(sent1, -2) * self.kernel v2 = K.expand_dims(sent2, -2) * self.kernel v1 = K.l2_normalize(v1, axis=-1) v2 = K.l2_normalize(v2, axis=-1) matching = K.sum(v1 * v2, axis=-1) return matching
Example #29
Source File: layers.py From neuron with GNU General Public License v3.0 | 5 votes |
def _single_matmul(self, x, mult): x = K.expand_dims(x, -2) y = tf.matmul(x, mult)[...,0,:] return y
Example #30
Source File: time_frequency.py From kapre with MIT License | 5 votes |
def _spectrogram_mono(self, x): '''x.shape : (None, 1, len_src), returns 2D batch of a mono power-spectrogram''' x = K.permute_dimensions(x, [0, 2, 1]) x = K.expand_dims(x, 3) # add a dummy dimension (channel axis) subsample = (self.n_hop, 1) output_real = K.conv2d( x, self.dft_real_kernels, strides=subsample, padding=self.padding, data_format='channels_last', ) output_imag = K.conv2d( x, self.dft_imag_kernels, strides=subsample, padding=self.padding, data_format='channels_last', ) output = output_real ** 2 + output_imag ** 2 # now shape is (batch_sample, n_frame, 1, freq) if self.image_data_format == 'channels_last': output = K.permute_dimensions(output, [0, 3, 1, 2]) else: output = K.permute_dimensions(output, [0, 2, 3, 1]) return output