Python theano.tensor.ones_like() Examples
The following are 30
code examples of theano.tensor.ones_like().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
theano.tensor
, or try the search function
.
Example #1
Source File: test_basic_ops.py From attention-lvcsr with MIT License | 6 votes |
def test_gpualloc_output_to_gpu(): a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32') a = tcn.shared_constructor(a_val) b = T.fscalar() f = theano.function([b], T.ones_like(a) + b, mode=mode_without_gpu) f_gpu = theano.function([b], B.gpu_from_host(T.ones_like(a)) + b, mode=mode_with_gpu) f(2) f_gpu(2) assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 1 assert sum([node.op == B.gpu_alloc for node in f_gpu.maker.fgraph.toposort()]) == 1 assert numpy.allclose(numpy.ones(a.get_value(borrow=True).shape) + 9, f_gpu(9)) assert numpy.allclose(f(5), f_gpu(5))
Example #2
Source File: basic.py From D-VAE with MIT License | 6 votes |
def sp_ones_like(x): """ Construct a sparse matrix of ones with the same sparsity pattern. Parameters ---------- x Sparse matrix to take the sparsity pattern. Returns ------- A sparse matrix The same as `x` with data changed for ones. """ # TODO: don't restrict to CSM formats data, indices, indptr, shape = csm_properties(x) return CSM(format=x.format)(tensor.ones_like(data), indices, indptr, shape)
Example #3
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): """Computes mean and std for batch then apply batch_normalization on batch. """ # TODO remove this if statement when Theano without # T.nnet.bn.batch_normalization_train is deprecated if not hasattr(T.nnet.bn, 'batch_normalization_train'): return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon) if gamma is None: if beta is None: gamma = ones_like(x) else: gamma = ones_like(beta) if beta is None: if gamma is None: beta = zeros_like(x) beta = zeros_like(gamma) normed, mean, stdinv = T.nnet.bn.batch_normalization_train( x, gamma, beta, reduction_axes, epsilon) return normed, mean, T.inv(stdinv ** 2)
Example #4
Source File: test_basic.py From attention-lvcsr with MIT License | 6 votes |
def test_int32_dtype(self): # Reported on the theano-user mailing-list: # https://groups.google.com/d/msg/theano-users/MT9ui8LtTsY/rwatwEF9zWAJ size = 9 intX = 'int32' C = tensor.matrix('C', dtype=intX) I = tensor.matrix('I', dtype=intX) fI = I.flatten() data = tensor.ones_like(fI) indptr = tensor.arange(data.shape[0] + 1, dtype='int32') m1 = sparse.CSR(data, fI, indptr, (8, size)) m2 = sparse.dot(m1, C) y = m2.reshape(shape=(2, 4, 9), ndim=3) f = theano.function(inputs=[I, C], outputs=y) i = numpy.asarray([[4, 3, 7, 7], [2, 8, 4, 5]], dtype=intX) a = numpy.asarray(numpy.random.randint(0, 100, (size, size)), dtype=intX) f(i, a)
Example #5
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3): """Apply batch normalization on x given mean, var, beta and gamma. """ # TODO remove this if statement when Theano without # T.nnet.bn.batch_normalization_test is deprecated if not hasattr(T.nnet.bn, 'batch_normalization_test'): return _old_batch_normalization(x, mean, var, beta, gamma, epsilon) if gamma is None: gamma = ones_like(var) if beta is None: beta = zeros_like(mean) if mean.ndim == 1: # based on TensorFlow's default: normalize along rightmost dimension reduction_axes = list(range(x.ndim - 1)) else: reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]] return T.nnet.bn.batch_normalization_test( x, gamma, beta, mean, var, reduction_axes, epsilon) # TODO remove this function when Theano without # T.nnet.bn.batch_normalization_train is deprecated
Example #6
Source File: test_basic.py From D-VAE with MIT License | 6 votes |
def test_structured_add_s_v(self): sp_types = {'csc': sp.csc_matrix, 'csr': sp.csr_matrix} for format in ['csr', 'csc']: for dtype in ['float32', 'float64']: x = theano.sparse.SparseType(format, dtype=dtype)() y = tensor.vector(dtype=dtype) f = theano.function([x, y], structured_add_s_v(x, y)) spmat = sp_types[format](random_lil((4, 3), dtype, 3)) spones = spmat.copy() spones.data = numpy.ones_like(spones.data) mat = numpy.asarray(numpy.random.rand(3), dtype=dtype) out = f(spmat, mat) utt.assert_allclose(as_ndarray(spones.multiply(spmat + mat)), out.toarray())
Example #7
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3): """Apply batch normalization on x given mean, var, beta and gamma. """ # TODO remove this if statement when Theano without # T.nnet.bn.batch_normalization_test is deprecated if not hasattr(T.nnet.bn, 'batch_normalization_test'): return _old_batch_normalization(x, mean, var, beta, gamma, epsilon) if gamma is None: gamma = ones_like(var) if beta is None: beta = zeros_like(mean) if mean.ndim == 1: # based on TensorFlow's default: normalize along rightmost dimension reduction_axes = list(range(x.ndim - 1)) else: reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]] return T.nnet.bn.batch_normalization_test( x, gamma, beta, mean, var, reduction_axes, epsilon) # TODO remove this function when Theano without # T.nnet.bn.batch_normalization_train is deprecated
Example #8
Source File: ctc_base.py From theano_ctc with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_node(self, acts, labels, input_lengths): # Unless specified, assume all sequences have full sequence length, i.e. acts_.shape[0] if input_lengths == None: input_lengths = T.cast(acts.shape[0], dtype="int32") * T.ones_like(acts[0,:,0], dtype=np.int32) # acts.shape = [seqLen, batchN, outputUnit] if acts.dtype != "float32": raise Exception("acts must be float32 instead of %s" % acts.dtype) # labels.shape = [batchN, labelLen] if labels.dtype != "int32": raise Exception("labels must be int32 instead of %s" % labels.dtype) # input_lengths.shape = [batchN] if input_lengths.dtype != "int32": raise Exception("input_lengths must be int32 instead of %s" % input_lengths.dtype) applyNode = theano.Apply(self, inputs=[acts, input_lengths, labels], outputs=[self.costs, self.gradients]) # Return only the cost. Gradient will be returned by grad() self.default_output = 0 return applyNode
Example #9
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): """Computes mean and std for batch then apply batch_normalization on batch. """ # TODO remove this if statement when Theano without # T.nnet.bn.batch_normalization_train is deprecated if not hasattr(T.nnet.bn, 'batch_normalization_train'): return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon) if gamma is None: if beta is None: gamma = ones_like(x) else: gamma = ones_like(beta) if beta is None: if gamma is None: beta = zeros_like(x) beta = zeros_like(gamma) normed, mean, stdinv = T.nnet.bn.batch_normalization_train( x, gamma, beta, reduction_axes, epsilon) return normed, mean, T.inv(stdinv ** 2)
Example #10
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3): """Apply batch normalization on x given mean, var, beta and gamma. """ # TODO remove this if statement when Theano without # T.nnet.bn.batch_normalization_test is deprecated if not hasattr(T.nnet.bn, 'batch_normalization_test'): return _old_batch_normalization(x, mean, var, beta, gamma, epsilon) if gamma is None: gamma = ones_like(var) if beta is None: beta = zeros_like(mean) if mean.ndim == 1: # based on TensorFlow's default: normalize along rightmost dimension reduction_axes = list(range(x.ndim - 1)) else: reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]] return T.nnet.bn.batch_normalization_test( x, gamma, beta, mean, var, reduction_axes, epsilon) # TODO remove this function when Theano without # T.nnet.bn.batch_normalization_train is deprecated
Example #11
Source File: test_basic.py From attention-lvcsr with MIT License | 6 votes |
def test_structured_add_s_v(self): sp_types = {'csc': sp.csc_matrix, 'csr': sp.csr_matrix} for format in ['csr', 'csc']: for dtype in ['float32', 'float64']: x = theano.sparse.SparseType(format, dtype=dtype)() y = tensor.vector(dtype=dtype) f = theano.function([x, y], structured_add_s_v(x, y)) spmat = sp_types[format](random_lil((4, 3), dtype, 3)) spones = spmat.copy() spones.data = numpy.ones_like(spones.data) mat = numpy.asarray(numpy.random.rand(3), dtype=dtype) out = f(spmat, mat) utt.assert_allclose(as_ndarray(spones.multiply(spmat + mat)), out.toarray())
Example #12
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): """Computes mean and std for batch then apply batch_normalization on batch. """ # TODO remove this if statement when Theano without # T.nnet.bn.batch_normalization_train is deprecated if not hasattr(T.nnet.bn, 'batch_normalization_train'): return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon) if gamma is None: if beta is None: gamma = ones_like(x) else: gamma = ones_like(beta) if beta is None: if gamma is None: beta = zeros_like(x) beta = zeros_like(gamma) normed, mean, stdinv = T.nnet.bn.batch_normalization_train( x, gamma, beta, reduction_axes, epsilon) return normed, mean, T.inv(stdinv ** 2)
Example #13
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3): """Apply batch normalization on x given mean, var, beta and gamma. """ # TODO remove this if statement when Theano without # T.nnet.bn.batch_normalization_test is deprecated if not hasattr(T.nnet.bn, 'batch_normalization_test'): return _old_batch_normalization(x, mean, var, beta, gamma, epsilon) if gamma is None: gamma = ones_like(var) if beta is None: beta = zeros_like(mean) if mean.ndim == 1: # based on TensorFlow's default: normalize along rightmost dimension reduction_axes = list(range(x.ndim - 1)) else: reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]] return T.nnet.bn.batch_normalization_test( x, gamma, beta, mean, var, reduction_axes, epsilon) # TODO remove this function when Theano without # T.nnet.bn.batch_normalization_train is deprecated
Example #14
Source File: basic.py From attention-lvcsr with MIT License | 6 votes |
def sp_ones_like(x): """ Construct a sparse matrix of ones with the same sparsity pattern. Parameters ---------- x Sparse matrix to take the sparsity pattern. Returns ------- A sparse matrix The same as `x` with data changed for ones. """ # TODO: don't restrict to CSM formats data, indices, indptr, shape = csm_properties(x) return CSM(format=x.format)(tensor.ones_like(data), indices, indptr, shape)
Example #15
Source File: test_basic.py From D-VAE with MIT License | 6 votes |
def test_int32_dtype(self): # Reported on the theano-user mailing-list: # https://groups.google.com/d/msg/theano-users/MT9ui8LtTsY/rwatwEF9zWAJ size = 9 intX = 'int32' C = tensor.matrix('C', dtype=intX) I = tensor.matrix('I', dtype=intX) fI = I.flatten() data = tensor.ones_like(fI) indptr = tensor.arange(data.shape[0] + 1, dtype='int32') m1 = sparse.CSR(data, fI, indptr, (8, size)) m2 = sparse.dot(m1, C) y = m2.reshape(shape=(2, 4, 9), ndim=3) f = theano.function(inputs=[I, C], outputs=y) i = numpy.asarray([[4, 3, 7, 7], [2, 8, 4, 5]], dtype=intX) a = numpy.asarray(numpy.random.randint(0, 100, (size, size)), dtype=intX) f(i, a)
Example #16
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_gpujoin_gpualloc(): a = T.fmatrix('a') a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32') b = T.fmatrix('b') b_val = numpy.asarray(numpy.random.rand(3, 5), dtype='float32') f = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)) + 4, mode=mode_without_gpu) f_gpu = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)), mode=mode_with_gpu) f_gpu2 = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)) + 4, mode=mode_with_gpu) assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 2 assert sum([node.op == T.join for node in f.maker.fgraph.toposort()]) == 1 assert sum([isinstance(node.op, B.GpuAlloc) for node in f_gpu.maker.fgraph.toposort()]) == 2 assert sum([node.op == B.gpu_join for node in f_gpu.maker.fgraph.toposort()]) == 1 assert sum([isinstance(node.op, B.GpuAlloc) for node in f_gpu2.maker.fgraph.toposort()]) == 2 assert sum([node.op == B.gpu_join for node in f_gpu2.maker.fgraph.toposort()]) == 1 assert numpy.allclose(f(a_val, b_val), f_gpu2(a_val, b_val))
Example #17
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): """Computes mean and std for batch then apply batch_normalization on batch. """ # TODO remove this if statement when Theano without # T.nnet.bn.batch_normalization_train is deprecated if not hasattr(T.nnet.bn, 'batch_normalization_train'): return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon) if gamma is None: if beta is None: gamma = ones_like(x) else: gamma = ones_like(beta) if beta is None: if gamma is None: beta = zeros_like(x) beta = zeros_like(gamma) normed, mean, stdinv = T.nnet.bn.batch_normalization_train( x, gamma, beta, reduction_axes, epsilon) return normed, mean, T.inv(stdinv ** 2)
Example #18
Source File: gauss.py From D-VAE with MIT License | 6 votes |
def compute_kernel(lls, lsf, x, z): ls = T.exp(lls) sf = T.exp(lsf) if x.ndim == 1: x = x[ None, : ] if z.ndim == 1: z = z[ None, : ] lsre = T.outer(T.ones_like(x[ :, 0 ]), ls) r2 = T.outer(T.sum(x * x / lsre, 1), T.ones_like(z[ : , 0 : 1 ])) - np.float32(2) * \ T.dot(x / lsre, T.transpose(z)) + T.dot(np.float32(1.0) / lsre, T.transpose(z)**2) k = sf * T.exp(-np.float32(0.5) * r2) return k
Example #19
Source File: gauss.py From D-VAE with MIT License | 6 votes |
def compute_psi1(lls, lsf, xmean, xvar, z): if xmean.ndim == 1: xmean = xmean[ None, : ] ls = T.exp(lls) sf = T.exp(lsf) lspxvar = ls + xvar constterm1 = ls / lspxvar constterm2 = T.prod(T.sqrt(constterm1), 1) r2_psi1 = T.outer(T.sum(xmean * xmean / lspxvar, 1), T.ones_like(z[ : , 0 : 1 ])) \ - np.float32(2) * T.dot(xmean / lspxvar, T.transpose(z)) + \ T.dot(np.float32(1.0) / lspxvar, T.transpose(z)**2) psi1 = sf * T.outer(constterm2, T.ones_like(z[ : , 0 : 1 ])) * T.exp(-np.float32(0.5) * r2_psi1) return psi1
Example #20
Source File: sparse_gp_theano_internal.py From D-VAE with MIT License | 6 votes |
def compute_log_ei(self, x, incumbent): Kzz = compute_kernel(self.lls, self.lsf, self.z, self.z) + T.eye(self.z.shape[ 0 ]) * self.jitter * T.exp(self.lsf) KzzInv = T.nlinalg.MatrixInversePSD()(Kzz) LLt = T.dot(self.LParamPost, T.transpose(self.LParamPost)) covCavityInv = KzzInv + LLt * casting(self.n_points - self.set_for_training) / casting(self.n_points) covCavity = T.nlinalg.MatrixInversePSD()(covCavityInv) meanCavity = T.dot(covCavity, casting(self.n_points - self.set_for_training) / casting(self.n_points) * self.mParamPost) KzzInvcovCavity = T.dot(KzzInv, covCavity) KzzInvmeanCavity = T.dot(KzzInv, meanCavity) Kxz = compute_kernel(self.lls, self.lsf, x, self.z) B = T.dot(KzzInvcovCavity, KzzInv) - KzzInv v_out = T.exp(self.lsf) + T.dot(Kxz * T.dot(Kxz, B), T.ones_like(self.z[ : , 0 : 1 ])) # + T.exp(self.lvar_noise) m_out = T.dot(Kxz, KzzInvmeanCavity) s = (incumbent - m_out) / T.sqrt(v_out) log_ei = T.log((incumbent - m_out) * ratio(s) + T.sqrt(v_out)) + log_n_pdf(s) return log_ei
Example #21
Source File: gauss.py From icml18-jtnn with MIT License | 6 votes |
def compute_kernel(lls, lsf, x, z): ls = T.exp(lls) sf = T.exp(lsf) if x.ndim == 1: x = x[ None, : ] if z.ndim == 1: z = z[ None, : ] lsre = T.outer(T.ones_like(x[ :, 0 ]), ls) r2 = T.outer(T.sum(x * x / lsre, 1), T.ones_like(z[ : , 0 : 1 ])) - np.float32(2) * \ T.dot(x / lsre, T.transpose(z)) + T.dot(np.float32(1.0) / lsre, T.transpose(z)**2) k = sf * T.exp(-np.float32(0.5) * r2) return k
Example #22
Source File: gauss.py From icml18-jtnn with MIT License | 6 votes |
def compute_psi1(lls, lsf, xmean, xvar, z): if xmean.ndim == 1: xmean = xmean[ None, : ] ls = T.exp(lls) sf = T.exp(lsf) lspxvar = ls + xvar constterm1 = ls / lspxvar constterm2 = T.prod(T.sqrt(constterm1), 1) r2_psi1 = T.outer(T.sum(xmean * xmean / lspxvar, 1), T.ones_like(z[ : , 0 : 1 ])) \ - np.float32(2) * T.dot(xmean / lspxvar, T.transpose(z)) + \ T.dot(np.float32(1.0) / lspxvar, T.transpose(z)**2) psi1 = sf * T.outer(constterm2, T.ones_like(z[ : , 0 : 1 ])) * T.exp(-np.float32(0.5) * r2_psi1) return psi1
Example #23
Source File: sparse_gp_theano_internal.py From icml18-jtnn with MIT License | 6 votes |
def compute_log_ei(self, x, incumbent): Kzz = compute_kernel(self.lls, self.lsf, self.z, self.z) + T.eye(self.z.shape[ 0 ]) * self.jitter * T.exp(self.lsf) KzzInv = T.nlinalg.MatrixInversePSD()(Kzz) LLt = T.dot(self.LParamPost, T.transpose(self.LParamPost)) covCavityInv = KzzInv + LLt * casting(self.n_points - self.set_for_training) / casting(self.n_points) covCavity = T.nlinalg.MatrixInversePSD()(covCavityInv) meanCavity = T.dot(covCavity, casting(self.n_points - self.set_for_training) / casting(self.n_points) * self.mParamPost) KzzInvcovCavity = T.dot(KzzInv, covCavity) KzzInvmeanCavity = T.dot(KzzInv, meanCavity) Kxz = compute_kernel(self.lls, self.lsf, x, self.z) B = T.dot(KzzInvcovCavity, KzzInv) - KzzInv v_out = T.exp(self.lsf) + T.dot(Kxz * T.dot(Kxz, B), T.ones_like(self.z[ : , 0 : 1 ])) # + T.exp(self.lvar_noise) m_out = T.dot(Kxz, KzzInvmeanCavity) s = (incumbent - m_out) / T.sqrt(v_out) log_ei = T.log((incumbent - m_out) * ratio(s) + T.sqrt(v_out)) + log_n_pdf(s) return log_ei
Example #24
Source File: bricks.py From Associative_LSTM with MIT License | 6 votes |
def apply(self, inputs, states, cells, mask=None): def slice_last(x, no): return x[:, no * self.dim: (no + 1) * self.dim] activation = tensor.dot(states, self.W_state) + inputs in_gate = self.gate_activation.apply( slice_last(activation, 0)) pre = slice_last(activation, 1) forget_gate = self.gate_activation.apply( pre + self.bias * tensor.ones_like(pre)) next_cells = ( forget_gate * cells + in_gate * self.activation.apply(slice_last(activation, 2))) out_gate = self.gate_activation.apply( slice_last(activation, 3)) next_states = out_gate * self.activation.apply(next_cells) if mask: next_states = (mask[:, None] * next_states + (1 - mask[:, None]) * states) next_cells = (mask[:, None] * next_cells + (1 - mask[:, None]) * cells) return next_states, next_cells
Example #25
Source File: theano_backend.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3): """Apply batch normalization on x given mean, var, beta and gamma. """ # TODO remove this if statement when Theano without # T.nnet.bn.batch_normalization_test is deprecated if not hasattr(T.nnet.bn, 'batch_normalization_test'): return _old_batch_normalization(x, mean, var, beta, gamma, epsilon) if gamma is None: gamma = ones_like(var) if beta is None: beta = zeros_like(mean) if mean.ndim == 1: # based on TensorFlow's default: normalize along rightmost dimension reduction_axes = list(range(x.ndim - 1)) else: reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]] return T.nnet.bn.batch_normalization_test( x, gamma, beta, mean, var, reduction_axes, epsilon) # TODO remove this function when Theano without # T.nnet.bn.batch_normalization_train is deprecated
Example #26
Source File: bn.py From attention-lvcsr with MIT License | 6 votes |
def _compute_training_statistics(self, input_): axes = (0,) + tuple((i + 1) for i, b in enumerate(self.population_mean.broadcastable) if b) mean = input_.mean(axis=axes, keepdims=True) assert mean.broadcastable[1:] == self.population_mean.broadcastable add_role(mean, BATCH_NORM_MINIBATCH_ESTIMATE) if self.mean_only: stdev = tensor.ones_like(mean) else: stdev = tensor.sqrt(tensor.var(input_, axis=axes, keepdims=True) + numpy.cast[theano.config.floatX](self.epsilon)) assert (stdev.broadcastable[1:] == self.population_stdev.broadcastable) add_role(stdev, BATCH_NORM_MINIBATCH_ESTIMATE) return mean, stdev
Example #27
Source File: theano_backend.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): """Computes mean and std for batch then apply batch_normalization on batch. """ # TODO remove this if statement when Theano without # T.nnet.bn.batch_normalization_train is deprecated if not hasattr(T.nnet.bn, 'batch_normalization_train'): return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon) if gamma is None: if beta is None: gamma = ones_like(x) else: gamma = ones_like(beta) if beta is None: if gamma is None: beta = zeros_like(x) beta = zeros_like(gamma) normed, mean, stdinv = T.nnet.bn.batch_normalization_train( x, gamma, beta, reduction_axes, epsilon) return normed, mean, T.inv(stdinv ** 2)
Example #28
Source File: test_basic_ops.py From attention-lvcsr with MIT License | 6 votes |
def test_gpujoin_gpualloc(): a = T.fmatrix('a') a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32') b = T.fmatrix('b') b_val = numpy.asarray(numpy.random.rand(3, 5), dtype='float32') f = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)) + 4, mode=mode_without_gpu) f_gpu = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)), mode=mode_with_gpu) f_gpu2 = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)) + 4, mode=mode_with_gpu) assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 2 assert sum([node.op == T.join for node in f.maker.fgraph.toposort()]) == 1 assert sum([isinstance(node.op, GpuAlloc) for node in f_gpu.maker.fgraph.toposort()]) == 2 assert sum([node.op == gpu_join for node in f_gpu.maker.fgraph.toposort()]) == 1 assert sum([isinstance(node.op, GpuAlloc) for node in f_gpu2.maker.fgraph.toposort()]) == 2 assert sum([node.op == gpu_join for node in f_gpu2.maker.fgraph.toposort()]) == 1 assert numpy.allclose(f(a_val, b_val), f_gpu2(a_val, b_val))
Example #29
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_gpualloc_output_to_gpu(): a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32') a = tcn.shared_constructor(a_val) b = T.fscalar() f = theano.function([b], T.ones_like(a) + b, mode=mode_without_gpu) f_gpu = theano.function([b], B.gpu_from_host(T.ones_like(a)) + b, mode=mode_with_gpu) f(2) f_gpu(2) assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 1 assert sum([node.op == B.gpu_alloc for node in f_gpu.maker.fgraph.toposort()]) == 1 assert numpy.allclose(numpy.ones(a.get_value(borrow=True).shape) + 9, f_gpu(9)) assert numpy.allclose(f(5), f_gpu(5))
Example #30
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def ones_like(x, dtype=None, name=None): return T.ones_like(x, dtype=dtype)