Python tensorflow.dynamic_partition() Examples
The following are 30
code examples of tensorflow.dynamic_partition().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: model_func.py From traffic_video_analysis with Apache License 2.0 | 6 votes |
def huber_loss(infer, label, epsilon, layer_name): """ Args: infer label epsilon layer_name """ with tf.variable_scope(layer_name): abs_diff = tf.abs(tf.sub(infer, label)); index = tf.to_int32(abs_diff <= epsilon, name = 'partition_index') l1_part, l2_part = tf.dynamic_partition(abs_diff, index, 2) #l1_loss = tf.reduce_mean(l1_part, name = 'l1_loss') #l2_loss = tf.reduce_mean(tf.square(l2_part), name = 'l2_loss') l1_part_loss = epsilon * (l1_part - 0.5 * epsilon) l2_part_loss = 0.5 * tf.square(l2_part) hloss = tf.reduce_mean(tf.concat(0, [l1_part_loss,l2_part_loss]), name = 'huber_loss_sum') return hloss
Example #2
Source File: expert_utils.py From NMT_GAN with Apache License 2.0 | 6 votes |
def Dispatch(self, d_tensors): """Reshuffles input `Tensor`s to produce output `Tensor`s. The dimensions of all input and output `Tensor`s match, except for dimension 0. In dimension 0, the input `Tensor`s match the corresponding `gates` `Tensor`s which were passed to the constructor. Args: d_tensors: a list of `Tensor`s, one per datashard. Returns: a list of `Tensor`s, one per expert. """ parts = self._data_parallelism(tf.dynamic_partition, d_tensors, self._gates, self._model_parallelism.n) parts_by_expert = TransposeListOfLists(parts) x_tensors = self._model_parallelism(tf.concat, parts_by_expert, 0) return x_tensors
Example #3
Source File: models.py From DQN-using-PyTorch-and-ML-Agents with GNU General Public License v3.0 | 6 votes |
def create_inverse_model(self, encoded_state, encoded_next_state): """ Creates inverse model TensorFlow ops for Curiosity module. Predicts action taken given current and future encoded states. :param encoded_state: Tensor corresponding to encoded current state. :param encoded_next_state: Tensor corresponding to encoded next state. """ combined_input = tf.concat([encoded_state, encoded_next_state], axis=1) hidden = tf.layers.dense(combined_input, 256, activation=self.swish) if self.brain.vector_action_space_type == "continuous": pred_action = tf.layers.dense(hidden, self.a_size, activation=None) squared_difference = tf.reduce_sum(tf.squared_difference(pred_action, self.selected_actions), axis=1) self.inverse_loss = tf.reduce_mean(tf.dynamic_partition(squared_difference, self.mask, 2)[1]) else: pred_action = tf.layers.dense(hidden, self.a_size, activation=tf.nn.softmax) cross_entropy = tf.reduce_sum(-tf.log(pred_action + 1e-10) * self.selected_actions, axis=1) self.inverse_loss = tf.reduce_mean(tf.dynamic_partition(cross_entropy, self.mask, 2)[1])
Example #4
Source File: models.py From DQN-using-PyTorch-and-ML-Agents with GNU General Public License v3.0 | 6 votes |
def create_forward_model(self, encoded_state, encoded_next_state): """ Creates forward model TensorFlow ops for Curiosity module. Predicts encoded future state based on encoded current state and given action. :param encoded_state: Tensor corresponding to encoded current state. :param encoded_next_state: Tensor corresponding to encoded next state. """ combined_input = tf.concat([encoded_state, self.selected_actions], axis=1) hidden = tf.layers.dense(combined_input, 256, activation=self.swish) # We compare against the concatenation of all observation streams, hence `self.v_size + int(self.o_size > 0)`. pred_next_state = tf.layers.dense(hidden, self.curiosity_enc_size * (self.v_size + int(self.o_size > 0)), activation=None) squared_difference = 0.5 * tf.reduce_sum(tf.squared_difference(pred_next_state, encoded_next_state), axis=1) self.intrinsic_reward = tf.clip_by_value(self.curiosity_strength * squared_difference, 0, 1) self.forward_loss = tf.reduce_mean(tf.dynamic_partition(squared_difference, self.mask, 2)[1])
Example #5
Source File: expert_utils.py From ASR with Apache License 2.0 | 6 votes |
def Dispatch(self, d_tensors): """Reshuffles input `Tensor`s to produce output `Tensor`s. The dimensions of all input and output `Tensor`s match, except for dimension 0. In dimension 0, the input `Tensor`s match the corresponding `gates` `Tensor`s which were passed to the constructor. Args: d_tensors: a list of `Tensor`s, one per datashard. Returns: a list of `Tensor`s, one per expert. """ parts = self._data_parallelism(tf.dynamic_partition, d_tensors, self._gates, self._model_parallelism.n) parts_by_expert = TransposeListOfLists(parts) x_tensors = self._model_parallelism(tf.concat, parts_by_expert, 0) return x_tensors
Example #6
Source File: misc_fn.py From unsupervised_captioning with MIT License | 6 votes |
def find_obj(sentence, s_mask, classes, scores, num): """Computes the object reward for one sentence.""" shape = tf.shape(sentence) sentence = tf.boolean_mask(sentence, s_mask) def body(x): idx = tf.to_int32(tf.where(tf.equal(sentence, x))) idx = tf.cond(tf.shape(idx)[0] > 0, lambda: idx[0, 0], lambda: tf.constant(999, tf.int32)) return idx classes = classes[:num] scores = scores[:num] ind = tf.map_fn(body, classes, tf.int32) mask = tf.not_equal(ind, 999) miss, detected = tf.dynamic_partition(scores, tf.to_int32(mask), 2) ind = tf.boolean_mask(ind, mask) ret = tf.scatter_nd(tf.expand_dims(ind, 1), detected, shape) return ret
Example #7
Source File: model_func.py From traffic_video_analysis with Apache License 2.0 | 6 votes |
def triplet_loss(infer, labels, radius = 2.0): """ Args: infer: inference concatenate together with 2 * batch_size labels: 0 or 1 with batch_size radius: Return: loss: triplet loss """ feature_1, feature_2 = tf.split(0,2,infer) feature_diff = tf.reduce_sum(tf.square(feature_1 - feature_2), 1) feature_list = tf.dynamic_partition(feature_diff, labels, 2) pos_list = feature_list[1] neg_list = (tf.maximum(0.0, radius * radius - feature_list[0])) full_list = tf.concat(0,[pos_list, neg_list]) loss = tf.reduce_mean(full_list) return loss
Example #8
Source File: tf_utils.py From rasa_core with Apache License 2.0 | 6 votes |
def _arrange_back_fn(list_tensor_1d_mask_1d): """Arranges back tensor_1d to restore original order modified by `_rearrange_fn` according to mask_1d: - number of 0s in mask_1d values on the left are set to their corresponding places where mask_1d=0, - number of 1s in mask_1d values on the right are set to their corresponding places where mask_1d=1""" tensor_1d, mask_1d = list_tensor_1d_mask_1d mask_indices = tf.dynamic_partition(tf.range(tf.shape(tensor_1d)[0]), mask_1d, 2) mask_sum = tf.reduce_sum(mask_1d, axis=0) partitioned_tensor = [tf.zeros_like(tensor_1d[:-mask_sum]), tensor_1d[-mask_sum:]] return tf.dynamic_stitch(mask_indices, partitioned_tensor)
Example #9
Source File: model_func.py From traffic_video_analysis with Apache License 2.0 | 6 votes |
def huber_loss(infer, label, epsilon, layer_name): """ Args: infer label epsilon layer_name """ with tf.variable_scope(layer_name): abs_diff = tf.abs(tf.sub(infer, label)); index = tf.to_int32(abs_diff <= epsilon, name = 'partition_index') l1_part, l2_part = tf.dynamic_partition(abs_diff, index, 2) #l1_loss = tf.reduce_mean(l1_part, name = 'l1_loss') #l2_loss = tf.reduce_mean(tf.square(l2_part), name = 'l2_loss') l1_part_loss = epsilon * (l1_part - 0.5 * epsilon) l2_part_loss = 0.5 * tf.square(l2_part) hloss = tf.reduce_mean(tf.concat(0, [l1_part_loss,l2_part_loss]), name = 'huber_loss_sum') return hloss
Example #10
Source File: reinforce_simple_example.py From deep_image_model with Apache License 2.0 | 6 votes |
def split_apply_merge(inp, partitions, fns): """Split input according to partitions. Pass results through fns and merge. Args: inp: the input vector partitions: tensor of same length as input vector, having values 0, 1 fns: the two functions. Returns: the vector routed, where routed[i] = fns[partitions[i]](inp[i]) """ new_inputs = tf.dynamic_partition(inp, partitions, len(fns)) new_outputs = [fns[i](x) for i, x in enumerate(new_inputs)] new_indices = tf.dynamic_partition( tf.range(0, inp.get_shape()[0]), partitions, len(fns)) return tf.dynamic_stitch(new_indices, new_outputs)
Example #11
Source File: factorization_ops.py From deep_image_model with Apache License 2.0 | 6 votes |
def scatter_update(cls, factor, indices, values, sharding_func): """Helper function for doing sharded scatter update.""" assert isinstance(factor, list) if len(factor) == 1: with ops.colocate_with(factor[0]): # TODO(agarwal): assign instead of scatter update for full batch update. return tf.scatter_update(factor[0], indices, values).op else: num_shards = len(factor) assignments, new_ids = sharding_func(indices) assert assignments is not None assignments = tf.cast(assignments, tf.int32) sharded_ids = tf.dynamic_partition(new_ids, assignments, num_shards) sharded_values = tf.dynamic_partition(values, assignments, num_shards) updates = [] for i in xrange(num_shards): updates.append(tf.scatter_update(factor[i], sharded_ids[i], sharded_values[i])) return tf.group(*updates)
Example #12
Source File: model_func.py From traffic_video_analysis with Apache License 2.0 | 6 votes |
def triplet_loss(infer, labels, radius = 2.0): """ Args: infer: inference concatenate together with 2 * batch_size labels: 0 or 1 with batch_size radius: Return: loss: triplet loss """ feature_1, feature_2 = tf.split(0,2,infer) feature_diff = tf.reduce_sum(tf.square(feature_1 - feature_2), 1) feature_list = tf.dynamic_partition(feature_diff, labels, 2) pos_list = feature_list[1] neg_list = (tf.maximum(0.0, radius * radius - feature_list[0])) full_list = tf.concat(0,[pos_list, neg_list]) loss = tf.reduce_mean(full_list) return loss
Example #13
Source File: dynamic_partition_op_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testHigherRank(self): np.random.seed(7) with self.test_session() as sess: for n in 2, 3: for shape in (4,), (4, 5), (4, 5, 2): partitions = np.random.randint(n, size=np.prod(shape)).reshape(shape) for extra_shape in (), (6,), (6, 7): data = np.random.randn(*(shape + extra_shape)) partitions_t = tf.constant(partitions, dtype=tf.int32) data_t = tf.constant(data) outputs = tf.dynamic_partition( data_t, partitions_t, num_partitions=n) self.assertEqual(n, len(outputs)) outputs_val = sess.run(outputs) for i, output in enumerate(outputs_val): self.assertAllEqual(output, data[partitions == i]) # Test gradients outputs_grad = [7 * output for output in outputs_val] grads = tf.gradients(outputs, [data_t, partitions_t], outputs_grad) self.assertEqual(grads[1], None) # Partitions has no gradients self.assertAllEqual(7 * data, sess.run(grads[0]))
Example #14
Source File: dynamic_partition_op_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testSimpleTwoDimensional(self): with self.test_session() as sess: data = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11], [12, 13, 14], [15, 16, 17]]) indices = tf.constant([0, 0, 2, 3, 2, 1]) partitions = tf.dynamic_partition(data, indices, num_partitions=4) partition_vals = sess.run(partitions) self.assertAllEqual([[0, 1, 2], [3, 4, 5]], partition_vals[0]) self.assertAllEqual([[15, 16, 17]], partition_vals[1]) self.assertAllEqual([[6, 7, 8], [12, 13, 14]], partition_vals[2]) self.assertAllEqual([[9, 10, 11]], partition_vals[3]) # Vector data input to DynamicPartition results in # `num_partitions` matrices with an unknown number of rows, and 3 columns. self.assertEqual([None, 3], partitions[0].get_shape().as_list()) self.assertEqual([None, 3], partitions[1].get_shape().as_list()) self.assertEqual([None, 3], partitions[2].get_shape().as_list()) self.assertEqual([None, 3], partitions[3].get_shape().as_list())
Example #15
Source File: dynamic_partition_op_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testSimpleOneDimensional(self): with self.test_session() as sess: data = tf.constant([0, 13, 2, 39, 4, 17]) indices = tf.constant([0, 0, 2, 3, 2, 1]) partitions = tf.dynamic_partition(data, indices, num_partitions=4) partition_vals = sess.run(partitions) self.assertAllEqual([0, 13], partition_vals[0]) self.assertAllEqual([17], partition_vals[1]) self.assertAllEqual([2, 4], partition_vals[2]) self.assertAllEqual([39], partition_vals[3]) # Vector data input to DynamicPartition results in # `num_partitions` vectors of unknown length. self.assertEqual([None], partitions[0].get_shape().as_list()) self.assertEqual([None], partitions[1].get_shape().as_list()) self.assertEqual([None], partitions[2].get_shape().as_list()) self.assertEqual([None], partitions[3].get_shape().as_list())
Example #16
Source File: tensor_utils.py From garage with MIT License | 6 votes |
def filter_valids(t, valid, name='filter_valids'): """Filter out tensor using valid array. Args: t (tf.Tensor): The tensor to filter. valid (list[float]): Array of length of the valid values (either 0 or 1). name (string): Name of the operation. Returns: tf.Tensor: Filtered Tensor. """ # Must round before cast to prevent floating-error return tf.dynamic_partition(t, tf.cast(tf.round(valid), tf.int32), 2, name=name)[1]
Example #17
Source File: tensor_ops.py From hart with GNU General Public License v3.0 | 6 votes |
def select_present(x, presence, batch_size=1, name='select_present'): with tf.variable_scope(name): presence = 1 - tf.to_int32(presence) # invert mask bs = x.get_shape()[0] if bs != None: # here type(bs) is tf.Dimension and == is ok batch_size = int(bs) num_partitions = 2 * batch_size r = tf.range(0, num_partitions, 2) r.set_shape(tf.TensorShape(batch_size)) r = broadcast_against(r, presence) presence += r selected = tf.dynamic_partition(x, presence, num_partitions) selected = tf.concat(axis=0, values=selected) selected = tf.reshape(selected, tf.shape(x)) return selected
Example #18
Source File: layers.py From deepchem with MIT License | 6 votes |
def call(self, inputs): """Perform M steps of set2set gather, Detailed descriptions in: https://arxiv.org/abs/1511.06391 """ atom_features, atom_split = inputs c = tf.zeros((self.batch_size, self.n_hidden)) h = tf.zeros((self.batch_size, self.n_hidden)) for i in range(self.M): q_expanded = tf.gather(h, atom_split) e = tf.reduce_sum(atom_features * q_expanded, 1) e_mols = tf.dynamic_partition(e, atom_split, self.batch_size) # Add another value(~-Inf) to prevent error in softmax e_mols = [ tf.concat([e_mol, tf.constant([-1000.])], 0) for e_mol in e_mols ] a = tf.concat([tf.nn.softmax(e_mol)[:-1] for e_mol in e_mols], 0) r = tf.math.segment_sum( tf.reshape(a, [-1, 1]) * atom_features, atom_split) # Model using this layer must set pad_batches=True q_star = tf.concat([h, r], axis=1) h, c = self.LSTMStep(q_star, c) return q_star
Example #19
Source File: sharded_mutable_dense_hashtable.py From estimator with Apache License 2.0 | 6 votes |
def lookup(self, keys, name=None): """Looks up `keys` in a table, outputs the corresponding values.""" if keys.dtype.base_dtype != self._key_dtype: raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." % (self._key_dtype, keys.dtype)) self._check_keys(keys) num_shards = self._num_shards if num_shards == 1: return self._table_shards[0].lookup(keys, name=name) shard_indices = self._shard_indices(keys) key_shards = tf.dynamic_partition(keys, shard_indices, num_shards) value_shards = [ self._table_shards[i].lookup(key_shards[i], name=name) for i in range(num_shards) ] num_keys = tf.compat.v1.shape(keys)[0] original_indices = tf.range(num_keys) partitioned_indices = tf.dynamic_partition(original_indices, shard_indices, num_shards) return tf.dynamic_stitch(partitioned_indices, value_shards)
Example #20
Source File: encoder.py From atec-nlp with MIT License | 6 votes |
def _last_relevant(outputs, sequence_length): """Deprecated""" batch_size = tf.shape(outputs)[0] max_length = outputs.get_shape()[1] output_size = outputs.get_shape()[2] index = tf.range(0, batch_size) * max_length + (sequence_length - 1) flat = tf.reshape(outputs, [-1, output_size]) last_timesteps = tf.gather(flat, index) # very slow # mask = tf.sign(index) # last_timesteps = tf.boolean_mask(flat, mask) # # Creating a vector of 0s and 1s that will specify what timesteps to choose. # partitions = tf.reduce_sum(tf.one_hot(index, tf.shape(flat)[0], dtype='int32'), 0) # # Selecting the elements we want to choose. # _, last_timesteps = tf.dynamic_partition(flat, partitions, 2) # (batch_size, n_dim) # https://stackoverflow.com/questions/35892412/tensorflow-dense-gradient-explanation return last_timesteps
Example #21
Source File: sharded_mutable_dense_hashtable.py From estimator with Apache License 2.0 | 6 votes |
def insert(self, keys, values, name=None): """Inserts `keys` in a table.""" self._check_keys(keys) num_shards = self._num_shards if num_shards == 1: return self._table_shards[0].insert(keys, values, name=name) shard_indices = self._shard_indices(keys) key_shards = tf.dynamic_partition(keys, shard_indices, num_shards) value_shards = tf.dynamic_partition(values, shard_indices, num_shards) return_values = [ self._table_shards[i].insert(key_shards[i], value_shards[i], name=name) for i in range(num_shards) ] return tf.group(*return_values)
Example #22
Source File: layers_copy.py From graph_level_drug_discovery with Apache License 2.0 | 5 votes |
def graph_gather(atoms, membership_placeholder, batch_size): """ Parameters ---------- atoms: tf.Tensor Of shape (n_atoms, n_feat) membership_placeholder: tf.Placeholder Of shape (n_atoms,). Molecule each atom belongs to. batch_size: int Batch size for deep model. Returns ------- tf.Tensor Of shape (batch_size, n_feat) """ # WARNING: Does not work for Batch Size 1! If batch_size = 1, then use reduce_sum! assert batch_size > 1, "graph_gather requires batches larger than 1" # Obtain the partitions for each of the molecules activated_par = tf.dynamic_partition(atoms, membership_placeholder, batch_size) # Sum over atoms for each molecule sparse_reps = [ tf.reduce_sum(activated, 0, keep_dims=True) for activated in activated_par ] # Get the final sparse representations sparse_reps = tf.concat(axis=0, values=sparse_reps) return sparse_reps
Example #23
Source File: layers.py From graph_level_drug_discovery with Apache License 2.0 | 5 votes |
def graph_gather(atoms, membership_placeholder, batch_size): """ Parameters ---------- atoms: tf.Tensor Of shape (n_atoms, n_feat) membership_placeholder: tf.Placeholder Of shape (n_atoms,). Molecule each atom belongs to. batch_size: int Batch size for deep model. Returns ------- tf.Tensor Of shape (batch_size, n_feat) """ # WARNING: Does not work for Batch Size 1! If batch_size = 1, then use reduce_sum! assert batch_size > 1, "graph_gather requires batches larger than 1" # Obtain the partitions for each of the molecules activated_par = tf.dynamic_partition(atoms, membership_placeholder, batch_size) # Sum over atoms for each molecule sparse_reps = [ tf.reduce_sum(activated, 0, keep_dims=True) for activated in activated_par ] # Get the final sparse representations sparse_reps = tf.concat(axis=0, values=sparse_reps) return sparse_reps
Example #24
Source File: neural_network.py From SIMPLE-NN with GNU General Public License v3.0 | 5 votes |
def _calc_output(self): self.E = self.F = self.S = 0 for i, item in enumerate(self.parent.inputs['atom_types']): zero_cond = tf.equal(tf.reduce_sum(self.next_elem['N_'+item]), 0) self.E += tf.cond(zero_cond, lambda: tf.cast(0., tf.float64), lambda: tf.sparse_segment_sum(self.ys[item], self.next_elem['sparse_indices_'+item], self.next_elem['seg_id_'+item], num_segments=self.next_elem['num_seg'])[1:]) if self.inputs['use_force']: tmp_force = self.next_elem['dx_'+item] * \ tf.expand_dims(\ tf.expand_dims(self.dys[item], axis=2), axis=3) tmp_force = tf.reduce_sum(\ tf.sparse_segment_sum(tmp_force, self.next_elem['sparse_indices_'+item], self.next_elem['seg_id_'+item], num_segments=self.next_elem['num_seg'])[1:], axis=1) self.F -= tf.cond(zero_cond, lambda: tf.cast(0., tf.float64), lambda: tf.dynamic_partition(tf.reshape(tmp_force, [-1,3]), self.next_elem['partition'], 2)[1]) if self.inputs['use_stress']: tmp_stress = self.next_elem['da_'+item] * \ tf.expand_dims(\ tf.expand_dims(self.dys[item], axis=2), axis=3) tmp_stress = tf.cond(zero_cond, lambda: tf.cast(0., tf.float64) * tmp_stress, lambda: tf.sparse_segment_sum(tmp_stress, self.next_elem['sparse_indices_'+item], self.next_elem['seg_id_'+item], num_segments=self.next_elem['num_seg'])[1:]) self.S -= tf.reduce_sum(tmp_stress, axis=[1,2])/units.GPa*10
Example #25
Source File: contrib.py From open-solution-toxic-comments with MIT License | 5 votes |
def pair_loss(y_true, y_pred): y_true = tf.cast(y_true, tf.int32) parts = tf.dynamic_partition(y_pred, y_true, 2) y_pos = parts[1] y_neg = parts[0] y_pos = tf.expand_dims(y_pos, 0) y_neg = tf.expand_dims(y_neg, -1) out = K.sigmoid(y_neg - y_pos) return K.mean(out)
Example #26
Source File: expert_utils.py From ASR with Apache License 2.0 | 5 votes |
def __init__(self, data_parallelism, model_parallelism, gates): """Constructs a Dispatcher. Args: data_parallelism: a Parallelism object. model_parallelism: a Parallelism object. gates: a list of 1d integer `Tensor`s, one per datashard. Says which expert to use for each batch element. Returns: a DistributedSingleDispatcher """ gates = data_parallelism(tf.to_int32, gates) self._gates = gates self._data_parallelism = data_parallelism self._model_parallelism = model_parallelism # Compute the sizes number of examples going from each datashard to each # expert. def _PartSizes(gates): return tf.unsorted_segment_sum( tf.ones_like(gates), gates, model_parallelism.n) part_sizes_by_datashard = data_parallelism(_PartSizes, gates) self._part_sizes_by_expert = tf.unstack( tf.stack(part_sizes_by_datashard), num=model_parallelism.n, axis=1) # These indices will be used to combine the output on the datashards. def _StitchIndices(gates): return tf.dynamic_partition( tf.range(tf.size(gates)), gates, model_parallelism.n) self._stitch_indices = data_parallelism(_StitchIndices, gates)
Example #27
Source File: sdca_ops.py From estimator with Apache License 2.0 | 5 votes |
def _get_partitioned_update_ops(self, v_num, num_partitions_by_var, p_assignments_by_var, gather_ids_by_var, weights, full_update, p_assignments, num_partitions): """Get updates for partitioned variables.""" num_partitions = num_partitions_by_var[v_num] p_assignments = p_assignments_by_var[v_num] gather_ids = gather_ids_by_var[v_num] updates = tf.dynamic_partition(full_update, p_assignments, num_partitions) update_ops = [] for p in range(num_partitions): with ops.colocate_with(weights[p]): result = tf.compat.v1.scatter_add(weights[p], gather_ids[p], updates[p]) update_ops.append(result) return update_ops
Example #28
Source File: tensorflow_util.py From MedicalDataAugmentationTool with GNU General Public License v3.0 | 5 votes |
def reduce_sum_masked(input, mask, axis=None, keepdims=False): assert mask.dtype == tf.bool, 'mask must be bool' # convert mask to float and use it as weights weights = tf.cast(mask, dtype=input.dtype) return reduce_sum_weighted(input, weights, axis, keepdims) #bad_data, good_data = tf.dynamic_partition(input, tf.cast(mask, tf.int32), 2) #return tf.reduce_sum(bad_data, axis=axis, keep_dims=keep_dims)
Example #29
Source File: VAE_functions.py From HI-VAE with MIT License | 5 votes |
def theta_estimation_from_ys(samples_y, samples_s, types_list, miss_list, batch_size, reuse): theta = [] #Independet yd -> Compute p(xd|yd) for i,d in enumerate(samples_y): #Partition the data in missing data (0) and observed data (1) missing_y, observed_y = tf.dynamic_partition(d, miss_list[:,i], num_partitions=2) missing_s, observed_s = tf.dynamic_partition(samples_s, miss_list[:,i], num_partitions=2) condition_indices = tf.dynamic_partition(tf.range(tf.shape(d)[0]), miss_list[:,i], num_partitions=2) nObs = tf.shape(observed_y)[0] #Different layer models for each type of variable if types_list[i]['type'] == 'real': # params = theta_real(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse) params = theta_real_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs, batch_size, i, reuse) elif types_list[i]['type'] == 'pos': # params = theta_pos(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse) params = theta_pos_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs, batch_size, i, reuse) elif types_list[i]['type'] == 'count': # params = theta_count(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse) params = theta_count_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs, batch_size, i, reuse) elif types_list[i]['type'] == 'cat': # params = theta_cat(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse) params = theta_cat_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs, batch_size, i, reuse) elif types_list[i]['type'] == 'ordinal': # params = theta_ordinal(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse) params = theta_ordinal_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs, batch_size, i, reuse) theta.append(params) return theta
Example #30
Source File: contrib.py From open-solution-mapping-challenge with MIT License | 5 votes |
def pair_loss(y_true, y_pred): y_true = tf.cast(y_true, tf.int32) parts = tf.dynamic_partition(y_pred, y_true, 2) y_pos = parts[1] y_neg = parts[0] y_pos = tf.expand_dims(y_pos, 0) y_neg = tf.expand_dims(y_neg, -1) out = K.sigmoid(y_neg - y_pos) return K.mean(out)