Python theano.tensor.reshape() Examples
The following are 30
code examples of theano.tensor.reshape().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
theano.tensor
, or try the search function
.
Example #1
Source File: layers_theano.py From visual_dynamics with MIT License | 6 votes |
def get_output_for(self, inputs, **kwargs): Y, U = inputs if Y.ndim > (self.axis + 1): Y = Y.flatten(self.axis + 1) assert Y.ndim == self.axis + 1 outer_YU = Y.dimshuffle(list(range(Y.ndim)) + ['x']) * U.dimshuffle([0] + ['x']*self.axis + [1]) bilinear = T.dot(outer_YU.reshape((-1, self.y_dim * self.u_dim)), self.Q.reshape((self.y_dim, self.y_dim * self.u_dim)).T) if self.axis > 1: bilinear = bilinear.reshape((-1,) + self.y_shape[:self.axis-1] + (self.y_dim,)) linear_u = T.dot(U, self.R.T) if self.axis > 1: linear_u = linear_u.dimshuffle([0] + ['x']*(self.axis-1) + [1]) linear_y = T.dot(Y, self.S.T) if self.axis > 1: linear_y = linear_y.reshape((-1,) + self.y_shape[:self.axis-1] + (self.y_dim,)) activation = bilinear + linear_u + linear_y if self.b is not None: activation += self.b.dimshuffle(['x']*self.axis + [0]) activation = activation.reshape((-1,) + self.y_shape) return activation
Example #2
Source File: transform_rnn.py From View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition with MIT License | 6 votes |
def call(self,x,training=None): deta1 = 0.3 deta2 = 0.3 deta3 = 0.3 seed = np.random.randint(1, 10e6) rng = RandomStreams(seed=seed) theta1 = rng.uniform(size=(x.shape[0],1),low=-deta1,high=deta1,dtype='float32') theta2 = rng.uniform(size=(x.shape[0],1),low=-deta2,high=deta2,dtype='float32') theta3 = rng.uniform(size=(x.shape[0],1),low=-deta3,high=deta3,dtype='float32') theta = K.concatenate([theta1,theta2,theta3],axis=-1) theta = K.tile(theta,x.shape[1]) theta = theta.reshape((x.shape[0], x.shape[1], 3)) theta = theta.reshape((theta.shape[0]*theta.shape[1], theta.shape[2])) M = _fusion(theta) output = _transform_rot(M, x) return K.in_train_phase(output,x,training = training)
Example #3
Source File: test_basic_ops.py From attention-lvcsr with MIT License | 6 votes |
def test_advinc_subtensor1(): """ Test the second case in the opt local_gpu_advanced_incsubtensor1 """ for shp in [(3, 3), (3, 3, 3)]: shared = cuda.shared_constructor xval = numpy.arange(numpy.prod(shp), dtype='float32').reshape(shp) + 1 yval = numpy.empty((2,) + shp[1:], dtype='float32') yval[:] = 10 x = shared(xval, name='x') y = T.tensor(dtype='float32', broadcastable=(False,) * len(shp), name='y') expr = T.advanced_inc_subtensor1(x, y, [0, 2]) f = theano.function([y], expr, mode=mode_with_gpu) assert sum([isinstance(node.op, cuda.GpuAdvancedIncSubtensor1) for node in f.maker.fgraph.toposort()]) == 1 rval = f(yval) rep = xval.copy() rep[[0, 2]] += yval utt.assert_allclose(rval, rep)
Example #4
Source File: test_basic_ops.py From attention-lvcsr with MIT License | 6 votes |
def test_elemwise_collapse6(): """ Test when all inputs have two broadcastable dimension at the beginning""" shape = (4, 5) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle('x', 'x', 0, 1) b = tcn.CudaNdarrayType((True, True, False, False))() f = pfunc([b], [a3 + b], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(1, 1, shape[0], shape[1]), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(1, 1, shape[0], shape[1]) + v) # print "Expected collapse to c contiguous"
Example #5
Source File: test_basic_ops.py From attention-lvcsr with MIT License | 6 votes |
def test_elemwise_collapse4(): """ Test when only one inputs have two broadcastable dimension at each ends and we add a scalar""" shape = (4, 5) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle('x', 0, 1, 'x') b = tcn.CudaNdarrayType((False, False, False, False))() c = (a3 + b + 2) f = pfunc([b], [c], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v + 2) # print "Expected collapse to 3 dimensions"
Example #6
Source File: theano_backend.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None): data_format = normalize_data_format(data_format) stride = strides[0] kernel_shape = int_shape(kernel) output_length, feature_dim, filters = kernel_shape xs = [] for i in range(output_length): slice_length = py_slice(i * stride, i * stride + kernel_size[0]) xs.append(reshape(inputs[:, slice_length, :], (1, -1, feature_dim))) x_aggregate = concatenate(xs, axis=0) # Shape: `(output_length, batch_size, filters)`. output = batch_dot(x_aggregate, kernel) return permute_dimensions(output, (1, 0, 2))
Example #7
Source File: transform_rnn.py From View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition with MIT License | 6 votes |
def call(self,x,mask=None): conv_input,theta = x s = theta.shape theta = T.reshape(theta,[-1,s[2]]) m = K.not_equal(conv_input,0.) #### For translation trans = _trans(theta) output = _transform_trans(trans, conv_input) output = output * K.cast(m,K.floatx()) ### For rotation M = _fusion(theta) output = _transform_rot(M,output) return output
Example #8
Source File: test_basic_ops.py From attention-lvcsr with MIT License | 6 votes |
def test_elemwise_collapse3(): """ Test when only one inputs have two broadcastable dimension at each ends """ shape = (4, 5) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle('x', 0, 1, 'x') b = tcn.CudaNdarrayType((False, False, False, False))() c = (a3 + b) f = pfunc([b], [c], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v) # print "Expected collapse to 3 dimensions"
Example #9
Source File: test_basic_ops.py From attention-lvcsr with MIT License | 6 votes |
def test_elemwise_collapse(): """ Test when all inputs have one(and the same) broadcastable dimension """ shape = (4, 5, 60) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle(0, 'x', 1, 2) b = tcn.CudaNdarrayType((False, True, False, False))() c = a3 + b f = pfunc([b], [c], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(shape[0], 1, *shape[1:]), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(shape[0], 1, *shape[1:]) + v) # print "Expected collapse of all dimensions"
Example #10
Source File: test_basic_ops.py From attention-lvcsr with MIT License | 6 votes |
def test_elemwise_collapse2(): """ Test when only one inputs have one broadcastable dimension """ shape = (4, 5, 9) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle(0, 'x', 1, 2) b = tcn.CudaNdarrayType((False, False, False, False))() c = a3 + b f = pfunc([b], [c], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(shape[0], 5, *shape[1:]), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(shape[0], 1, *shape[1:]) + v) # print "Expected collapse to 3 dimensions"
Example #11
Source File: test_basic_ops.py From attention-lvcsr with MIT License | 6 votes |
def test_elemwise_collapse7(atol=1e-6): """ Test when one input have one broadcastable dimension and the other is a scalar""" shape = (5, 4, 1) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a.copy(), 'a') a3 = a2.dimshuffle(0, 'x', 1, 2) f = pfunc([], [a3 + 2], mode=mode_with_gpu) # let debugmode catch errors out = f()[0] ans = (a + 2).reshape(shape[0], 1, shape[1], shape[2]) assert numpy.allclose(out, ans, atol=atol) # print "Expected collapse to c contiguous"
Example #12
Source File: layers.py From 3D-R2N2 with MIT License | 6 votes |
def set_output(self): padding = self._padding input_shape = self._input_shape padded_input = tensor.alloc(0.0, # Value to fill the tensor input_shape[0], input_shape[1] + 2 * padding[1], input_shape[2], input_shape[3] + 2 * padding[3], input_shape[4] + 2 * padding[4]) padded_input = tensor.set_subtensor(padded_input[:, padding[1]:padding[1] + input_shape[ 1], :, padding[3]:padding[3] + input_shape[3], padding[4]:padding[4] + input_shape[4]], self._prev_layer.output) fc_output = tensor.reshape( tensor.dot(self._fc_layer.output, self.Wx.val), self._output_shape) self._output = conv3d2d.conv3d(padded_input, self.Wh.val) + \ fc_output + self.b.val.dimshuffle('x', 'x', 0, 'x', 'x')
Example #13
Source File: toolbox.py From Theano-Lights with MIT License | 6 votes |
def depool(X, factor=2): """ Luke perforated upsample: http://www.brml.org/uploads/tx_sibibtex/281.pdf """ output_shape = [ X.shape[1], X.shape[2]*factor, X.shape[3]*factor ] stride = X.shape[2] offset = X.shape[3] in_dim = stride * offset out_dim = in_dim * factor * factor upsamp_matrix = T.zeros((in_dim, out_dim)) rows = T.arange(in_dim) cols = rows*factor + (rows/stride * factor * offset) upsamp_matrix = T.set_subtensor(upsamp_matrix[rows, cols], 1.) flat = T.reshape(X, (X.shape[0], output_shape[0], X.shape[2] * X.shape[3])) up_flat = T.dot(flat, upsamp_matrix) upsamp = T.reshape(up_flat, (X.shape[0], output_shape[0], output_shape[1], output_shape[2])) return upsamp
Example #14
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_elemwise_collapse(): """ Test when all inputs have one(and the same) broadcastable dimension """ shape = (4, 5, 60) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle(0, 'x', 1, 2) b = tcn.CudaNdarrayType((False, True, False, False))() c = a3 + b f = pfunc([b], [c], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(shape[0], 1, *shape[1:]), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(shape[0], 1, *shape[1:]) + v) # print "Expected collapse of all dimensions"
Example #15
Source File: test_basic_ops.py From attention-lvcsr with MIT License | 6 votes |
def test_advset_subtensor1(): """ Test GPU version of set_subtensor on vectors (uses GpuAdvancedIncSubtensor1) """ shp = (10,) shared = cuda.shared_constructor xval = numpy.arange(shp[0], dtype='float32').reshape(shp) + 1 idxs = numpy.array([0,2,5,7,3], dtype='int32') yval = numpy.ones(len(idxs), dtype='float32')*10 x = shared(xval, name='x') y = T.tensor(dtype='float32', broadcastable=(False,) * len(shp), name='y') expr = T.advanced_set_subtensor1(x, y, idxs) f = theano.function([y], expr, mode=mode_with_gpu) assert sum([isinstance(node.op, cuda.GpuAdvancedIncSubtensor1) for node in f.maker.fgraph.toposort()]) == 1 rval = f(yval) rep = xval.copy() rep[idxs] = yval utt.assert_allclose(rval, rep)
Example #16
Source File: layers_theano.py From visual_dynamics with MIT License | 6 votes |
def get_output_for(self, inputs, **kwargs): Y, U = inputs Y = Y.flatten(3) outer_YU = Y.dimshuffle([0, 1, 2, 'x']) * U.dimshuffle([0, 'x', 'x', 1]) bilinear, _ = theano.scan(fn=lambda Q, outer_YU2: T.dot(outer_YU2, Q.T), sequences=[self.Q.reshape((self.c_dim, self.y_dim, self.y_dim * self.u_dim)), outer_YU.dimshuffle([1, 0, 2, 3]).reshape((self.c_dim, -1, self.y_dim * self.u_dim))]) linear_u, _ = theano.scan(fn=lambda R, U2: T.dot(U2, R.T), sequences=[self.R], non_sequences=U) linear_y, _ = theano.scan(fn=lambda S, Y2: T.dot(Y2, S.T), sequences=[self.S, Y.dimshuffle([1, 0, 2])]) activation = bilinear + linear_u + linear_y if self.b is not None: activation += self.b.dimshuffle([0, 'x', 1]) activation = activation.dimshuffle([1, 0, 2]).reshape((-1,) + self.y_shape) return activation
Example #17
Source File: highway.py From LasagneNLP with Apache License 2.0 | 6 votes |
def get_output_for(self, input, **kwargs): # if the input has more than two dimensions, flatten it into a # batch of feature vectors. input_reshape = input.flatten(2) if input.ndim > 2 else input activation = T.dot(input_reshape, self.W_h) if self.b_h is not None: activation = activation + self.b_h.dimshuffle('x', 0) activation = self.nonlinearity(activation) transform = T.dot(input_reshape, self.W_t) if self.b_t is not None: transform = transform + self.b_t.dimshuffle('x', 0) transform = nonlinearities.sigmoid(transform) carry = 1.0 - transform output = activation * transform + input_reshape * carry # reshape output back to orignal input_shape if input.ndim > 2: output = T.reshape(output, input.shape) return output
Example #18
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_advset_subtensor1_2d(): """ Test GPU version of set_subtensor on matrices (uses GpuAdvancedIncSubtensor1_dev20 if compute capability >= 2.0) """ shp = (10,5) shared = cuda.shared_constructor xval = numpy.arange(numpy.prod(shp), dtype='float32').reshape(shp) + 1 idxs = numpy.array([0,2,5,7,3], dtype='int32') yval = numpy.ones((len(idxs), shp[1]), dtype='float32')*10 x = shared(xval, name='x') y = T.tensor(dtype='float32', broadcastable=(False,) * len(shp), name='y') expr = T.advanced_set_subtensor1(x, y, idxs) f = theano.function([y], expr, mode=mode_with_gpu) assert sum([isinstance(node.op, cuda.GpuAdvancedIncSubtensor1) for node in f.maker.fgraph.toposort()]) == 1 rval = f(yval) rep = xval.copy() rep[idxs] = yval utt.assert_allclose(rval, rep)
Example #19
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_advset_subtensor1(): """ Test GPU version of set_subtensor on vectors (uses GpuAdvancedIncSubtensor1) """ shp = (10,) shared = cuda.shared_constructor xval = numpy.arange(shp[0], dtype='float32').reshape(shp) + 1 idxs = numpy.array([0,2,5,7,3], dtype='int32') yval = numpy.ones(len(idxs), dtype='float32')*10 x = shared(xval, name='x') y = T.tensor(dtype='float32', broadcastable=(False,) * len(shp), name='y') expr = T.advanced_set_subtensor1(x, y, idxs) f = theano.function([y], expr, mode=mode_with_gpu) assert sum([isinstance(node.op, cuda.GpuAdvancedIncSubtensor1) for node in f.maker.fgraph.toposort()]) == 1 rval = f(yval) rep = xval.copy() rep[idxs] = yval utt.assert_allclose(rval, rep)
Example #20
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_advinc_subtensor1(): """ Test the second case in the opt local_gpu_advanced_incsubtensor1 """ for shp in [(3, 3), (3, 3, 3)]: shared = cuda.shared_constructor xval = numpy.arange(numpy.prod(shp), dtype='float32').reshape(shp) + 1 yval = numpy.empty((2,) + shp[1:], dtype='float32') yval[:] = 10 x = shared(xval, name='x') y = T.tensor(dtype='float32', broadcastable=(False,) * len(shp), name='y') expr = T.advanced_inc_subtensor1(x, y, [0, 2]) f = theano.function([y], expr, mode=mode_with_gpu) assert sum([isinstance(node.op, cuda.GpuAdvancedIncSubtensor1) for node in f.maker.fgraph.toposort()]) == 1 rval = f(yval) rep = xval.copy() rep[[0, 2]] += yval utt.assert_allclose(rval, rep)
Example #21
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_elemwise_collapse7(atol=1e-6): """ Test when one input have one broadcastable dimension and the other is a scalar""" shape = (5, 4, 1) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a.copy(), 'a') a3 = a2.dimshuffle(0, 'x', 1, 2) f = pfunc([], [a3 + 2], mode=mode_with_gpu) # let debugmode catch errors out = f()[0] ans = (a + 2).reshape(shape[0], 1, shape[1], shape[2]) assert numpy.allclose(out, ans, atol=atol) # print "Expected collapse to c contiguous"
Example #22
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_elemwise_collapse6(): """ Test when all inputs have two broadcastable dimension at the beginning""" shape = (4, 5) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle('x', 'x', 0, 1) b = tcn.CudaNdarrayType((True, True, False, False))() f = pfunc([b], [a3 + b], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(1, 1, shape[0], shape[1]), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(1, 1, shape[0], shape[1]) + v) # print "Expected collapse to c contiguous"
Example #23
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_elemwise_collapse4(): """ Test when only one inputs have two broadcastable dimension at each ends and we add a scalar""" shape = (4, 5) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle('x', 0, 1, 'x') b = tcn.CudaNdarrayType((False, False, False, False))() c = (a3 + b + 2) f = pfunc([b], [c], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v + 2) # print "Expected collapse to 3 dimensions"
Example #24
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_elemwise_collapse3(): """ Test when only one inputs have two broadcastable dimension at each ends """ shape = (4, 5) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle('x', 0, 1, 'x') b = tcn.CudaNdarrayType((False, False, False, False))() c = (a3 + b) f = pfunc([b], [c], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v) # print "Expected collapse to 3 dimensions"
Example #25
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_elemwise_collapse2(): """ Test when only one inputs have one broadcastable dimension """ shape = (4, 5, 9) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle(0, 'x', 1, 2) b = tcn.CudaNdarrayType((False, False, False, False))() c = a3 + b f = pfunc([b], [c], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(shape[0], 5, *shape[1:]), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(shape[0], 1, *shape[1:]) + v) # print "Expected collapse to 3 dimensions"
Example #26
Source File: toolbox.py From Theano-Lights with MIT License | 6 votes |
def downsample(data): data['_tr_X'] = np.zeros((len(data['tr_X']), 14*14), dtype='float32') data['_va_X'] = np.zeros((len(data['va_X']), 14*14), dtype='float32') data['_te_X'] = np.zeros((len(data['te_X']), 14*14), dtype='float32') for i in xrange(0, len(data['tr_X'])): data['_tr_X'][i] = block_reduce(data['tr_X'][i].reshape(data['shape_x']), block_size=(2,2), func=np.mean).flatten() for i in xrange(0, len(data['va_X'])): data['_va_X'][i] = block_reduce(data['va_X'][i].reshape(data['shape_x']), block_size=(2,2), func=np.mean).flatten() for i in xrange(0, len(data['te_X'])): data['_te_X'][i] = block_reduce(data['te_X'][i].reshape(data['shape_x']), block_size=(2,2), func=np.mean).flatten() data['tr_X'] = data['_tr_X'] data['va_X'] = data['_va_X'] data['te_X'] = data['_te_X'] data['shape_x'] = (14,14) data['n_x'] = 14*14 return data
Example #27
Source File: layers_theano.py From visual_dynamics with MIT License | 5 votes |
def convolve(self, input, **kwargs): W_shape = self.get_W_shape() W_shape = (W_shape[0], W_shape[1] * self.groups, W_shape[2], W_shape[3]) # the following is the symbolic equivalent of # W = np.zeros(W_shape) # for g in range(self.groups): # input_slice = slice(g * self.input_shape[1] // self.groups, # (g + 1) * self.input_shape[1] // self.groups) # output_slice = slice(g * self.num_filters // self.groups, (g + 1) * self.num_filters // self.groups) # W[output_slice, input_slice, :, :] = self.W.get_value()[output_slice, :, :, :] # repeat W across the second dimension and then mask the terms outside the block diagonals mask = np.zeros(W_shape[:2]).astype(theano.config.floatX) for g in range(self.groups): input_slice = slice(g * self.input_shape[1] // self.groups, (g + 1) * self.input_shape[1] // self.groups) output_slice = slice(g * self.num_filters // self.groups, (g + 1) * self.num_filters // self.groups) mask[output_slice, input_slice] = 1 # elementwise multiplication along broadcasted dimensions is faster than T.tile # the following is equivalent to # W = T.tile(self.W, (1, self.groups, 1, 1)) * mask[:, :, None, None] W = (T.ones((1, self.groups, 1, 1, 1)) * self.W[:, None, :, :, :]).reshape(W_shape) * mask[:, :, None, None] # similarly for T.repeat but we don't use that in here # W = T.repeat(self.W, self.groups, axis=1) * mask[:, :, None, None] # W = (T.ones((1, 1, self.groups, 1, 1)) * self.W[:, :, None, :, :]).reshape(W_shape) * mask[:, :, None, None] border_mode = 'half' if self.pad == 'same' else self.pad conved = self.convolution(input, W, self.input_shape, W_shape, subsample=self.stride, filter_dilation=self.filter_dilation, border_mode=border_mode, filter_flip=self.flip_filters) return conved
Example #28
Source File: servoing_policy.py From visual_dynamics with MIT License | 5 votes |
def objective(self, obs, action, preprocessed=False): """ The following should be true if the predictor of the next feature is linear objectives = [self.objective(state, action) for (state, action) in zip(states, actions)] linearized_objectives = [self.linearized_objective(state, action) for (state, action) in zip(states, actions)] assert np.allclose(objectives, linearized_objectives) """ if self.w.shape != (len(self.repeats),): raise NotImplementedError assert isinstance(obs, dict) image = obs[self.image_name] target_image = obs[self.target_image_name] features = self.predictor.feature(np.array([image, target_image]), preprocessed=preprocessed) y, y_target = np.concatenate([f.reshape((f.shape[0], -1)) for f in features], axis=1) if self.alpha != 1.0: y_target = self.alpha * y_target + (1 - self.alpha) * y # unlike in the other methods, the next feature in here uses the action action next_feature = self.predictor.next_feature(image, action, preprocessed=preprocessed) y_next_pred = np.concatenate([f.flatten() for f in next_feature]) if preprocessed: u = action else: u = self.action_transformer.preprocess(action) value = np.repeat(self.w / self.repeats, self.repeats).dot((y_target - y_next_pred) ** 2) + self.lambda_.dot(u ** 2) return value
Example #29
Source File: layers_theano.py From visual_dynamics with MIT License | 5 votes |
def get_output_for(self, input, **kwargs): a, b = self.scale_factor input_shape = input.shape downscaled = input.reshape((input_shape[0], input_shape[1], input_shape[2] // a, a, input_shape[3] // b, b)).mean(axis=(-3, -1)) return downscaled
Example #30
Source File: theano_backend.py From Att-ChemdNER with Apache License 2.0 | 5 votes |
def _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): '''Computes mean and std for batch then apply batch_normalization on batch. ''' dev = theano.config.device use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu')) if use_cudnn: broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x') broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x') try: normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train( x, broadcast_gamma, broadcast_beta, 'spatial', epsilon) var = T.inv(stdinv ** 2) return normed, T.flatten(mean), T.flatten(var) except AttributeError: pass var = x.var(reduction_axes) mean = x.mean(reduction_axes) target_shape = [] for axis in range(ndim(x)): if axis in reduction_axes: target_shape.append(1) else: target_shape.append(x.shape[axis]) target_shape = T.stack(*target_shape) broadcast_mean = T.reshape(mean, target_shape) broadcast_var = T.reshape(var, target_shape) broadcast_beta = T.reshape(beta, target_shape) broadcast_gamma = T.reshape(gamma, target_shape) normed = batch_normalization(x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normed, mean, var # TODO remove this if statement when Theano without # T.nnet.bn.batch_normalization_test is deprecated