Python tensorflow.ones_like() Examples
The following are 30
code examples of tensorflow.ones_like().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: cenNnFullyElasticNet.py From decompose with MIT License | 6 votes |
def cond(self) -> CenNnFullyElasticNetCond: b = self.b mu = self.mu tau = self.tau betaExponential = self.betaExponential tauLomax = self.tauLomax b = tf.ones_like(tauLomax)*b mu = tf.ones_like(tauLomax)*mu tau = tf.ones_like(tauLomax)*tau betaExponential = tf.ones_like(tauLomax)*betaExponential name = self.name + "Cond" properties = Properties(name=name, drawType=self.drawType, updateType=self.updateType, persistent=False) cond = CenNnFullyElasticNetCond(b=b, mu=mu, tau=tau, betaExponential=betaExponential, beta=1./tauLomax, properties=properties) return(cond)
Example #2
Source File: skip_thoughts_model_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def build_inputs(self): if self.mode == "encode": # Encode mode doesn't read from disk, so defer to parent. return super(SkipThoughtsModel, self).build_inputs() else: # Replace disk I/O with random Tensors. self.encode_ids = tf.random_uniform( [self.config.batch_size, 15], minval=0, maxval=self.config.vocab_size, dtype=tf.int64) self.decode_pre_ids = tf.random_uniform( [self.config.batch_size, 15], minval=0, maxval=self.config.vocab_size, dtype=tf.int64) self.decode_post_ids = tf.random_uniform( [self.config.batch_size, 15], minval=0, maxval=self.config.vocab_size, dtype=tf.int64) self.encode_mask = tf.ones_like(self.encode_ids) self.decode_pre_mask = tf.ones_like(self.decode_pre_ids) self.decode_post_mask = tf.ones_like(self.decode_post_ids)
Example #3
Source File: show_and_tell_model_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def build_inputs(self): if self.mode == "inference": # Inference mode doesn't read from disk, so defer to parent. return super(ShowAndTellModel, self).build_inputs() else: # Replace disk I/O with random Tensors. self.images = tf.random_uniform( shape=[self.config.batch_size, self.config.image_height, self.config.image_width, 3], minval=-1, maxval=1) self.input_seqs = tf.random_uniform( [self.config.batch_size, 15], minval=0, maxval=self.config.vocab_size, dtype=tf.int64) self.target_seqs = tf.random_uniform( [self.config.batch_size, 15], minval=0, maxval=self.config.vocab_size, dtype=tf.int64) self.input_mask = tf.ones_like(self.input_seqs)
Example #4
Source File: normalize.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def _std(self): """Computes the current estimate of the standard deviation. Note that the standard deviation is not defined until at least two samples were seen. Returns: Tensor of current variance. """ variance = tf.cond( self._count > 1, lambda: self._var_sum / tf.cast(self._count - 1, tf.float32), lambda: tf.ones_like(self._var_sum) * float('nan')) # The epsilon corrects for small negative variance values caused by # the algorithm. It was empirically chosen to work with all environments # tested. return tf.sqrt(variance + 1e-4)
Example #5
Source File: normalize.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def _std(self): """Computes the current estimate of the standard deviation. Note that the standard deviation is not defined until at least two samples were seen. Returns: Tensor of current variance. """ variance = tf.cond( self._count > 1, lambda: self._var_sum / tf.cast(self._count - 1, tf.float32), lambda: tf.ones_like(self._var_sum) * float('nan')) # The epsilon corrects for small negative variance values caused by # the algorithm. It was empirically chosen to work with all environments # tested. return tf.sqrt(variance + 1e-4)
Example #6
Source File: bidirectional.py From deep-summarization with MIT License | 6 votes |
def _load_data_graph(self): """ Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary) placeholders and the weights of the hidden layer of the Seq2Seq model. :return: None """ # input with tf.variable_scope("train_test", reuse=True): # review input - Both original and reversed self.enc_inp_fwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t) for t in range(self.seq_length)] self.enc_inp_bwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t) for t in range(self.seq_length)] # desired output self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t) for t in range(self.seq_length)] # weight of the hidden layer self.weights = [tf.ones_like(labels_t, dtype=tf.float32) for labels_t in self.labels] # Decoder input: prepend some "GO" token and drop the final # token of the encoder input self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
Example #7
Source File: stacked_bidirectional.py From deep-summarization with MIT License | 6 votes |
def _load_data_graph(self): """ Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary) placeholders and the weights of the hidden layer of the Seq2Seq model. :return: None """ # input with tf.variable_scope("train_test", reuse=True): # review input - Both original and reversed self.enc_inp_fwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t) for t in range(self.seq_length)] self.enc_inp_bwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t) for t in range(self.seq_length)] # desired output self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t) for t in range(self.seq_length)] # weight of the hidden layer self.weights = [tf.ones_like(labels_t, dtype=tf.float32) for labels_t in self.labels] # Decoder input: prepend some "GO" token and drop the final # token of the encoder input self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
Example #8
Source File: simple.py From deep-summarization with MIT License | 6 votes |
def _load_data_graph(self): """ Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary) placeholders and the weights of the hidden layer of the Seq2Seq model. :return: None """ # input with tf.variable_scope("train_test", reuse=True): self.enc_inp = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t) for t in range(self.seq_length)] # desired output self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t) for t in range(self.seq_length)] # weight of the hidden layer self.weights = [tf.ones_like(labels_t, dtype=tf.float32) for labels_t in self.labels] # Decoder input: prepend some "GO" token and drop the final # token of the encoder input self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
Example #9
Source File: stacked_simple.py From deep-summarization with MIT License | 6 votes |
def _load_data_graph(self): """ Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary) placeholders and the weights of the hidden layer of the Seq2Seq model. :return: None """ # input with tf.variable_scope("train_test", reuse=True): self.enc_inp = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t) for t in range(self.seq_length)] # desired output self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t) for t in range(self.seq_length)] # weight of the hidden layer self.weights = [tf.ones_like(labels_t, dtype=tf.float32) for labels_t in self.labels] # Decoder input: prepend some "GO" token and drop the final # token of the encoder input self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
Example #10
Source File: slicenet.py From fine-lm with MIT License | 6 votes |
def rank_loss(sentence_emb, image_emb, margin=0.2): """Experimental rank loss, thanks to kkurach@ for the code.""" with tf.name_scope("rank_loss"): # Normalize first as this is assumed in cosine similarity later. sentence_emb = tf.nn.l2_normalize(sentence_emb, 1) image_emb = tf.nn.l2_normalize(image_emb, 1) # Both sentence_emb and image_emb have size [batch, depth]. scores = tf.matmul(image_emb, tf.transpose(sentence_emb)) # [batch, batch] diagonal = tf.diag_part(scores) # [batch] cost_s = tf.maximum(0.0, margin - diagonal + scores) # [batch, batch] cost_im = tf.maximum( 0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores) # [batch, batch] # Clear diagonals. batch_size = tf.shape(sentence_emb)[0] empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size) cost_s *= empty_diagonal_mat cost_im *= empty_diagonal_mat return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im)
Example #11
Source File: tfe_utils.py From tangent with Apache License 2.0 | 6 votes |
def test_forward_tensor(func, wrt, *args): """Test gradients of functions with TFE signatures.""" def tangent_func(): df = jvp(func, wrt=wrt, optimized=True, verbose=True) args_ = args + tuple(tf.ones_like(args[i]) for i in wrt) # seed gradient return tensors_to_numpy(df(*args_)) def reference_func(): return tensors_to_numpy(tfe.gradients_function(func, params=wrt)(*args)) def backup_reference_func(): func_ = as_numpy_sig(func) args_ = tensors_to_numpy(args) return utils.numeric_grad(utils.numeric_grad(func_))(*args_) # TODO: Should results really be that far off? utils.assert_result_matches_reference( tangent_func, reference_func, backup_reference_func, tolerance=1e-4)
Example #12
Source File: sequence.py From icme2019 with MIT License | 6 votes |
def call(self, x): if (self.size == None) or (self.mode == 'sum'): self.size = int(x.shape[-1]) position_j = 1. / \ K.pow(10000., 2 * K.arange(self.size / 2, dtype='float32') / self.size) position_j = K.expand_dims(position_j, 0) position_i = tf.cumsum(K.ones_like(x[:, :, 0]), 1) - 1 position_i = K.expand_dims(position_i, 2) position_ij = K.dot(position_i, position_j) outputs = K.concatenate( [K.cos(position_ij), K.sin(position_ij)], 2) if self.mode == 'sum': if self.scale: outputs = outputs * outputs ** 0.5 return x + outputs elif self.mode == 'concat': return K.concatenate([outputs, x], 2)
Example #13
Source File: rnn_decoder_helpers.py From Counterfactual-StoryRW with MIT License | 6 votes |
def _top_k_logits(logits, k): """Adapted from https://github.com/openai/gpt-2/blob/master/src/sample.py#L63-L77 """ if k == 0: # no truncation return logits def _top_k(): values, _ = tf.nn.top_k(logits, k=k) min_values = values[:, -1, tf.newaxis] return tf.where( logits < min_values, tf.ones_like(logits, dtype=logits.dtype) * -1e10, logits, ) return tf.cond( tf.equal(k, 0), lambda: logits, lambda: _top_k(), )
Example #14
Source File: cenNormalRankOneAlgorithms.py From decompose with MIT License | 6 votes |
def fitGamma(cls, tau): alpha = 0.5/(tf.log(tf.reduce_mean(tau)) + 1e-6 # added due to numerical instability - tf.reduce_mean(tf.log(tau))) for i in range(20): alpha = (1. / (1./alpha + (tf.reduce_mean(tf.log(tau)) - tf.log(tf.reduce_mean(tau)) + tf.log(alpha) - tf.digamma(alpha)) / (alpha**2*(1./alpha - tf.polygamma(tf.ones_like(alpha), alpha))))) beta = alpha/tf.reduce_mean(tau) return(alpha, beta)
Example #15
Source File: test_normal2dLikelihood.py From decompose with MIT License | 6 votes |
def test_update(device, f, updateType, dtype): npdtype = dtype.as_numpy_dtype M, K, tau = (20, 30), 3, 0.1 npU = (np.random.normal(size=(K, M[0])).astype(npdtype), np.random.normal(size=(K, M[1])).astype(npdtype)) U = (tf.constant(npU[0]), tf.constant(npU[1])) npnoise = np.random.normal(size=M).astype(npdtype) npdata = np.dot(npU[0].T, npU[1]) + npnoise data = tf.constant(npdata, dtype=dtype) lh = Normal2dLikelihood(M=M, K=K, tau=tau, updateType=updateType) lh.init(data=data) lh.noiseDistribution.update = MagicMock() residuals = tf.ones_like(data) lh.residuals = MagicMock(return_value=residuals) lh.update(U, data) if updateType == UpdateType.ALL: lh.residuals.assert_called_once() lh.noiseDistribution.update.assert_called_once() else: lh.residuals.assert_not_called() lh.noiseDistribution.update.assert_not_called() tf.reset_default_graph()
Example #16
Source File: specificNormal2dLikelihood.py From decompose with MIT License | 6 votes |
def prepVars(self, f: int, U: List[Tensor], X: Tensor) -> Tuple[Tensor, Tensor, Tensor]: if f == 0: U1 = U[1] alpha1 = self.noiseDistribution.tau alpha = tf.ones_like(X[:, 0]) elif f == 1: U1 = U[0] alpha1 = tf.ones_like(X[:, 0]) alpha = self.noiseDistribution.tau X = tf.transpose(X) U1T = tf.transpose(U1) A = tf.matmul(X, U1T*alpha1[..., None]) B = tf.matmul(U1*alpha1, U1T) return(A, B, alpha)
Example #17
Source File: basic_seq2seq.py From Counterfactual-StoryRW with MIT License | 6 votes |
def _decode_infer(self, initial_state, encoder_results, features, labels, mode): start_token = self._tgt_vocab.bos_token_id start_tokens = tf.ones_like(features['source_length']) * start_token max_l = self._decoder.hparams.max_decoding_length_infer if self._hparams.beam_search_width > 1: return beam_search_decode( decoder_or_cell=self._decoder, embedding=self._tgt_embedder.embedding, start_tokens=start_tokens, end_token=self._tgt_vocab.eos_token_id, beam_width=self._hparams.beam_search_width, initial_state=initial_state, max_decoding_length=max_l) else: return self._decoder( initial_state=initial_state, decoding_strategy=self._hparams.decoding_strategy_infer, embedding=self._tgt_embedder.embedding, start_tokens=start_tokens, end_token=self._tgt_vocab.eos_token_id, mode=mode)
Example #18
Source File: metrics.py From Counterfactual-StoryRW with MIT License | 6 votes |
def binary_clas_accuracy(pos_preds=None, neg_preds=None): """Calculates the accuracy of binary predictions. Args: pos_preds (optional): A Tensor of any shape containing the predicted values on positive data (i.e., ground truth labels are `1`). neg_preds (optional): A Tensor of any shape containing the predicted values on negative data (i.e., ground truth labels are `0`). Returns: A float scalar Tensor containing the accuracy. """ pos_accu = accuracy(tf.ones_like(pos_preds), pos_preds) neg_accu = accuracy(tf.zeros_like(neg_preds), neg_preds) psize = tf.to_float(tf.size(pos_preds)) nsize = tf.to_float(tf.size(neg_preds)) accu = (pos_accu * psize + neg_accu * nsize) / (psize + nsize) return accu
Example #19
Source File: pwl_calibration_lib.py From lattice with Apache License 2.0 | 6 votes |
def compute_interpolation_weights(inputs, keypoints, lengths): """Computes weights for PWL calibration. Args: inputs: Tensor of shape: `(D0, D1, ..., DN, 1)` which represents inputs to to the pwl function. A typical shape is: `(batch_size, 1)`. keypoints: Rank-1 tensor of shape `(num_keypoints - 1)` which represents left keypoint of pieces of piecewise linear function along X axis. lengths: Rank-1 tensor of shape `(num_keypoints - 1)` which represents lengths of pieces of piecewise linear function along X axis. Returns: Interpolation weights tensor of shape: `(D0, D1, ..., DN, num_keypoints)`. """ weights = (inputs - keypoints) / lengths weights = tf.minimum(weights, 1.0) weights = tf.maximum(weights, 0.0) # Prepend 1.0 at the beginning to add bias unconditionally. return tf.concat([tf.ones_like(inputs), weights], axis=-1)
Example #20
Source File: ops.py From CartoonGAN-Tensorflow with MIT License | 6 votes |
def generator_loss(loss_func, fake): fake_loss = 0 if loss_func == 'wgan-gp' or loss_func == 'wgan-lp': fake_loss = -tf.reduce_mean(fake) if loss_func == 'lsgan' : fake_loss = tf.reduce_mean(tf.square(fake - 1.0)) if loss_func == 'gan' or loss_func == 'dragan': fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(fake), logits=fake)) if loss_func == 'hinge': fake_loss = -tf.reduce_mean(fake) loss = fake_loss return loss
Example #21
Source File: input_ops.py From DOTA_models with Apache License 2.0 | 5 votes |
def parse_example_batch(serialized): """Parses a batch of tf.Example protos. Args: serialized: A 1-D string Tensor; a batch of serialized tf.Example protos. Returns: encode: A SentenceBatch of encode sentences. decode_pre: A SentenceBatch of "previous" sentences to decode. decode_post: A SentenceBatch of "post" sentences to decode. """ features = tf.parse_example( serialized, features={ "encode": tf.VarLenFeature(dtype=tf.int64), "decode_pre": tf.VarLenFeature(dtype=tf.int64), "decode_post": tf.VarLenFeature(dtype=tf.int64), }) def _sparse_to_batch(sparse): ids = tf.sparse_tensor_to_dense(sparse) # Padding with zeroes. mask = tf.sparse_to_dense(sparse.indices, sparse.dense_shape, tf.ones_like(sparse.values, dtype=tf.int32)) return SentenceBatch(ids=ids, mask=mask) output_names = ("encode", "decode_pre", "decode_post") return tuple(_sparse_to_batch(features[x]) for x in output_names)
Example #22
Source File: cenNnFullyElasticNetAlgorithms.py From decompose with MIT License | 5 votes |
def fit(cls, parameters: Dict[str, Tensor], data: tf.Tensor) -> Dict[str, Tensor]: params = cls.getParameters(parameters=parameters) b, cenNnElasticnetParams, exponentialParams, lomaxParams = params cenNnElasticnetParams = CenNnElasticNetAlgorithms.fit( cenNnElasticnetParams, data) exponentialParams = ExponentialAlgorithms.fit(exponentialParams, data) lomaxParams = LomaxAlgorithms.fit(lomaxParams, data) cenNnElasticnetLlh = CenNnElasticNetAlgorithms.llh( cenNnElasticnetParams, data) cenNnElasticnetLlh = tf.reduce_mean(cenNnElasticnetLlh, axis=0) exponentialLlh = ExponentialAlgorithms.llh(exponentialParams, data) exponentialLlh = tf.reduce_mean(exponentialLlh, axis=0) lomaxLlh = LomaxAlgorithms.llh(lomaxParams, data) lomaxLlh = tf.reduce_mean(lomaxLlh, axis=0) condElasticNet = tf.logical_and(cenNnElasticnetLlh > lomaxLlh, cenNnElasticnetLlh > exponentialLlh) condExponential = exponentialLlh > lomaxLlh b = tf.where(condElasticNet, tf.zeros_like(cenNnElasticnetLlh), tf.where(condExponential, tf.ones_like(exponentialLlh), 2.*tf.ones_like(lomaxLlh))) updatedParameters = {"b": b, "mu": cenNnElasticnetParams["mu"], "tau": cenNnElasticnetParams["tau"], "betaExponential": exponentialParams["beta"], "alpha": lomaxParams["alpha"], "beta": lomaxParams["beta"], "tauLomax": lomaxParams["tau"]} return(updatedParameters)
Example #23
Source File: cenNnFullyElasticNetAlgorithms.py From decompose with MIT License | 5 votes |
def pdf(cls, parameters: Dict[str, Tensor], data: Tensor) -> Tensor: params = cls.getParameters(parameters=parameters) b, cenNnElasticnetParams, exponentialParams, lomaxParams = params pdfLomax = LomaxAlgorithms.pdf(lomaxParams, data) pdfExponential = ExponentialAlgorithms.pdf(exponentialParams, data) pdfElasticNet = CenNnElasticNetAlgorithms.pdf(cenNnElasticnetParams, data) b = b[None]*tf.ones_like(pdfElasticNet) pdf = tf.where(tf.equal(b, 0.), pdfElasticNet, tf.where(tf.equal(b, 1.), pdfExponential, pdfLomax)) return(pdf)
Example #24
Source File: lomaxAlgorithms.py From decompose with MIT License | 5 votes |
def fitLatents(cls, parameters: Dict[str, Tensor], data: Tensor) -> Dict[str, Tensor]: alpha, beta = parameters["alpha"], parameters["beta"] tau = (alpha + 1)/(beta + data) tau = tf.where(tf.less(tau, 1e9), tau, 1e9*tf.ones_like(tau)) tau = tf.where(tf.greater(tau, 1e-9), tau, 1e-9*tf.ones_like(tau)) return({"tau": tau})
Example #25
Source File: networks.py From dc_tts with Apache License 2.0 | 5 votes |
def Attention(Q, K, V, mononotic_attention=False, prev_max_attentions=None): ''' Args: Q: Queries. (B, T/r, d) K: Keys. (B, N, d) V: Values. (B, N, d) mononotic_attention: A boolean. At training, it is False. prev_max_attentions: (B,). At training, it is set to None. Returns: R: [Context Vectors; Q]. (B, T/r, 2d) alignments: (B, N, T/r) max_attentions: (B, T/r) ''' A = tf.matmul(Q, K, transpose_b=True) * tf.rsqrt(tf.to_float(hp.d)) if mononotic_attention: # for inference key_masks = tf.sequence_mask(prev_max_attentions, hp.max_N) reverse_masks = tf.sequence_mask(hp.max_N - hp.attention_win_size - prev_max_attentions, hp.max_N)[:, ::-1] masks = tf.logical_or(key_masks, reverse_masks) masks = tf.tile(tf.expand_dims(masks, 1), [1, hp.max_T, 1]) paddings = tf.ones_like(A) * (-2 ** 32 + 1) # (B, T/r, N) A = tf.where(tf.equal(masks, False), A, paddings) A = tf.nn.softmax(A) # (B, T/r, N) max_attentions = tf.argmax(A, -1) # (B, T/r) R = tf.matmul(A, V) R = tf.concat((R, Q), -1) alignments = tf.transpose(A, [0, 2, 1]) # (B, N, T/r) return R, alignments, max_attentions
Example #26
Source File: lomaxAlgorithms.py From decompose with MIT License | 5 votes |
def llh(cls, parameters: Dict[str, Tensor], data: tf.Tensor) -> Tensor: alpha, beta = parameters["alpha"], parameters["beta"] llh = tf.log(alpha) - tf.log(beta) - (alpha+1)*tf.log(1.+data/beta) llh = tf.where(tf.less(data, 0.), -np.inf*tf.ones_like(llh), llh) return(llh)
Example #27
Source File: cenNormalRankOneAlgorithms.py From decompose with MIT License | 5 votes |
def fit(cls, parameters: Dict[str, Tensor], data: tf.Tensor) -> Dict[str, Tensor]: """Optimal ML update using the EM algorithm.""" # regularized multiplicative M, N = data.get_shape().as_list() tau0, tau1 = parameters["tau0"], parameters["tau1"] # hyperparameter optimization alpha0, beta0 = cls.fitGamma(tau0) alpha1, beta1 = cls.fitGamma(tau1) # sampling taus alphaPost0 = alpha0 + N/2 betaPost0 = beta0 + tf.matmul(data**2, tau1[..., None])[..., 0]/2 tau0 = tf.distributions.Gamma(concentration=alphaPost0, rate=betaPost0).sample(1)[0] tau0 = tf.where(tau0 < 1e-6, tf.ones_like(tau0)*1e-6, tau0) alphaPost1 = alpha1 + M/2 betaPost1 = beta1 + tf.matmul(tau0[None, ...], data**2)[0, ...]/2 tau1 = tf.distributions.Gamma(concentration=alphaPost1, rate=betaPost1).sample(1)[0] tau1 = tf.where(tau1 < 1e-6, tf.ones_like(tau1)*1e-6, tau1) # rescaling taus normTau0 = tf.norm(tau0) normTau1 = tf.norm(tau1) normPerFactor = tf.sqrt(normTau0*normTau1) tau0 = tau0/normTau0*normPerFactor tau1 = tau1/normTau1*normPerFactor updatedParameters = {"tau0": tau0, "tau1": tau1} return(updatedParameters)
Example #28
Source File: specificNormal2dLikelihood.py From decompose with MIT License | 5 votes |
def init(self, data: Tensor) -> None: tau = self.__tauInit properties = self.__properties tau = tf.ones_like(data[0])*tau # TODO is using ones really useful noiseDistribution = CenNormal(tau=tau, properties=properties) self.__noiseDistribution = noiseDistribution
Example #29
Source File: losses_test.py From vehicle_counting_tensorflow with MIT License | 5 votes |
def testReturnsCorrectAnchorWiseLoss(self): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [0, 0, -100], [-100, -100, 100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) target_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [100, -100, -100], [-100, -100, 100]], [[-100, -100, 100], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) weights = tf.constant([[1, 1, .5, 1], [1, 1, 1, 0]], tf.float32) weights_shape = tf.shape(weights) weights_multiple = tf.concat( [tf.ones_like(weights_shape), tf.constant([3])], axis=0) weights = tf.tile(tf.expand_dims(weights, 2), weights_multiple) loss_op = losses.WeightedSoftmaxClassificationAgainstLogitsLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) exp_loss = np.matrix([[0, 0, - 0.5 * math.log(.5), 0], [-math.log(.5), 0, 0, 0]]) with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, exp_loss)
Example #30
Source File: tensorFactorisation.py From decompose with MIT License | 5 votes |
def rescale(self, U: List[Tensor], fNonUnit: int) -> List[Tensor]: """Puts all variance in the factor `fUpdate`-th factor. The method assumes that the norm of all filters is larger than 0.""" F = len(U) # calculathe the scale for each source scaleOfSources = tf.ones_like(U[0][..., 0]) for f in range(F): scaleOfSources = scaleOfSources*tf.norm(U[f], axis=-1) for f in range(F): # determine rescaling constant depending on the factor number Uf = U[f] normUf = tf.norm(Uf, axis=-1) if f == fNonUnit: # put all variance in the filters of the fUpdate-th factor rescaleConstant = scaleOfSources/normUf else: # normalize the filters all other factors rescaleConstant = 1./normUf # rescaled filters Uf = Uf*rescaleConstant[..., None] U[f] = Uf return(U)