Python theano.tensor.set_subtensor() Examples
The following are 30
code examples of theano.tensor.set_subtensor().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
theano.tensor
, or try the search function
.
Example #1
Source File: expressions.py From attention-lvcsr with MIT License | 6 votes |
def pad_to_a_multiple(tensor_, k, pad_with): """Pad a tensor to make its first dimension a multiple of a number. Parameters ---------- tensor_ : :class:`~theano.Variable` k : int The number, multiple of which the length of tensor is made. pad_with : float or int The value for padding. """ new_length = ( tensor.ceil(tensor_.shape[0].astype('float32') / k) * k).astype('int64') new_shape = tensor.set_subtensor(tensor_.shape[:1], new_length) canvas = tensor.alloc(pad_with, tensor.prod(new_shape)).reshape( new_shape, ndim=tensor_.ndim) return tensor.set_subtensor(canvas[:tensor_.shape[0]], tensor_)
Example #2
Source File: layers.py From 3D-R2N2 with MIT License | 6 votes |
def set_output(self): padding = self._padding input_shape = self._input_shape if np.sum(self._padding) > 0: padded_input = tensor.alloc(0.0, # Value to fill the tensor input_shape[0], input_shape[1] + 2 * padding[1], input_shape[2], input_shape[3] + 2 * padding[3], input_shape[4] + 2 * padding[4]) padded_input = tensor.set_subtensor( padded_input[:, padding[1]:padding[1] + input_shape[1], :, padding[3]:padding[3] + input_shape[3], padding[4]:padding[4] + input_shape[4]], self._prev_layer.output) else: padded_input = self._prev_layer.output self._output = conv3d2d.conv3d(padded_input, self.W.val) + \ self.b.val.dimshuffle('x', 'x', 0, 'x', 'x')
Example #3
Source File: theano_backend.py From Att-ChemdNER with Apache License 2.0 | 6 votes |
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev): active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()] active_next = T.cast(T.minimum( T.maximum( active + 1, T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1 ), log_p_curr.shape[0]), 'int32') common_factor = T.max(log_p_prev[:active]) p_prev = T.exp(log_p_prev[:active] - common_factor) _p_prev = zeros[:active_next] # copy over _p_prev = T.set_subtensor(_p_prev[:active], p_prev) # previous transitions _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1]) # skip transitions _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs]) updated_log_p_prev = T.log(_p_prev) + common_factor log_p_next = T.set_subtensor( zeros[:active_next], log_p_curr[:active_next] + updated_log_p_prev ) return active_next, log_p_next
Example #4
Source File: hgru4rec.py From hgru4rec with MIT License | 6 votes |
def adadelta(self, param, grad, updates, sample_idx=None, epsilon=1e-6): v1 = np.float32(self.decay) v2 = np.float32(1.0 - self.decay) acc = theano.shared(param.get_value(borrow=False) * 0., borrow=True) upd = theano.shared(param.get_value(borrow=False) * 0., borrow=True) if sample_idx is None: acc_new = acc + grad ** 2 updates[acc] = acc_new grad = T.sqrt(upd + epsilon) * grad upd_new = v1 * upd + v2 * grad ** 2 updates[upd] = upd_new else: acc_s = acc[sample_idx] acc_new = acc_s + grad ** 2 updates[acc] = T.set_subtensor(acc_s, acc_new) upd_s = upd[sample_idx] upd_new = v1 * upd_s + v2 * grad ** 2 updates[upd] = T.set_subtensor(upd_s, upd_new) grad = T.sqrt(upd_s + epsilon) * grad gradient_scaling = T.cast(T.sqrt(acc_new + epsilon), theano.config.floatX) return grad / gradient_scaling
Example #5
Source File: hgru4rec.py From hgru4rec with MIT License | 6 votes |
def adam(self, param, grad, updates, sample_idx=None, epsilon=1e-6): v1 = np.float32(self.decay) v2 = np.float32(1.0 - self.decay) acc = theano.shared(param.get_value(borrow=False) * 0., borrow=True) meang = theano.shared(param.get_value(borrow=False) * 0., borrow=True) countt = theano.shared(param.get_value(borrow=False) * 0., borrow=True) if sample_idx is None: acc_new = v1 * acc + v2 * grad ** 2 meang_new = v1 * meang + v2 * grad countt_new = countt + 1 updates[acc] = acc_new updates[meang] = meang_new updates[countt] = countt_new else: acc_s = acc[sample_idx] meang_s = meang[sample_idx] countt_s = countt[sample_idx] acc_new = v1 * acc_s + v2 * grad ** 2 meang_new = v1 * meang_s + v2 * grad countt_new = countt_s + 1.0 updates[acc] = T.set_subtensor(acc_s, acc_new) updates[meang] = T.set_subtensor(meang_s, meang_new) updates[countt] = T.set_subtensor(countt_s, countt_new) return (meang_new / (1 - v1 ** countt_new)) / (T.sqrt(acc_new / (1 - v1 ** countt_new)) + epsilon)
Example #6
Source File: layers.py From 3D-R2N2 with MIT License | 6 votes |
def set_output(self): output_shape = self._output_shape padding = self._padding unpool_size = self._unpool_size unpooled_output = tensor.alloc(0.0, # Value to fill the tensor output_shape[0], output_shape[1] + 2 * padding[0], output_shape[2], output_shape[3] + 2 * padding[1], output_shape[4] + 2 * padding[2]) unpooled_output = tensor.set_subtensor(unpooled_output[:, padding[0]:output_shape[ 1] + padding[0]:unpool_size[0], :, padding[1]:output_shape[3] + padding[1]:unpool_size[ 1], padding[2]:output_shape[4] + padding[2]:unpool_size[2]], self._prev_layer.output) self._output = unpooled_output
Example #7
Source File: layers.py From 3D-R2N2 with MIT License | 6 votes |
def set_output(self): padding = self._padding input_shape = self._input_shape padded_input = tensor.alloc(0.0, # Value to fill the tensor input_shape[0], input_shape[1] + 2 * padding[1], input_shape[2], input_shape[3] + 2 * padding[3], input_shape[4] + 2 * padding[4]) padded_input = tensor.set_subtensor(padded_input[:, padding[1]:padding[1] + input_shape[ 1], :, padding[3]:padding[3] + input_shape[3], padding[4]:padding[4] + input_shape[4]], self._prev_layer.output) fc_output = tensor.reshape( tensor.dot(self._fc_layer.output, self.Wx.val), self._output_shape) self._output = conv3d2d.conv3d(padded_input, self.Wh.val) + \ fc_output + self.b.val.dimshuffle('x', 'x', 0, 'x', 'x')
Example #8
Source File: layers.py From 3D-R2N2 with MIT License | 6 votes |
def set_output(self): padding = self._padding input_shape = self._input_shape padded_input = tensor.alloc(0.0, # Value to fill the tensor input_shape[0], input_shape[1] + 2 * padding[1], input_shape[2], input_shape[3] + 2 * padding[3], input_shape[4] + 2 * padding[4]) padded_input = tensor.set_subtensor(padded_input[:, padding[1]:padding[1] + input_shape[ 1], :, padding[3]:padding[3] + input_shape[3], padding[4]:padding[4] + input_shape[4]], self._prev_layer.output) self._output = conv3d2d.conv3d(padded_input, self.W.val) + \ self.b.val.dimshuffle('x', 'x', 0, 'x', 'x')
Example #9
Source File: __init__.py From adversarial with BSD 3-Clause "New" or "Revised" License | 6 votes |
def inpainting_sample_and_noise(self, X, default_input_include_prob=1., default_input_scale=1.): # Very hacky! Specifically for inpainting right half of CIFAR-10 given left half # assumes X is b01c assert X.ndim == 4 input_space = self.mlp.get_input_space() n = input_space.get_total_dimension() image_size = input_space.shape[0] half_image = int(image_size / 2) data_shape = (X.shape[0], image_size, half_image, input_space.num_channels) noise = self.theano_rng.normal(size=data_shape, dtype='float32') Xg = T.set_subtensor(X[:,:,half_image:,:], noise) sampled_part, noise = self.mlp.dropout_fprop(Xg, default_input_include_prob=default_input_include_prob, default_input_scale=default_input_scale), noise sampled_part = sampled_part.reshape(data_shape) rval = T.set_subtensor(X[:, :, half_image:, :], sampled_part) return rval, noise
Example #10
Source File: test_inc_subtensor.py From D-VAE with MIT License | 6 votes |
def test_wrong_broadcast(self): a = tt.col() increment = tt.vector() # These symbolic graphs legitimate, as long as increment has exactly # one element. So it should fail at runtime, not at compile time. rng = numpy.random.RandomState(utt.fetch_seed()) def rng_randX(*shape): return rng.rand(*shape).astype(theano.config.floatX) for op in (tt.set_subtensor, tt.inc_subtensor): for base in (a[:], a[0]): out = op(base, increment) f = theano.function([a, increment], out) # This one should work f(rng_randX(3, 1), rng_randX(1)) # These ones should not self.assertRaises(ValueError, f, rng_randX(3, 1), rng_randX(2)) self.assertRaises(ValueError, f, rng_randX(3, 1), rng_randX(3)) self.assertRaises(ValueError, f, rng_randX(3, 1), rng_randX(0))
Example #11
Source File: scan_utils.py From D-VAE with MIT License | 6 votes |
def expand_empty(tensor_var, size): """ Transforms the shape of a tensor from (d1, d2 ... ) to ( d1+size, d2, ..) by adding uninitialized memory at the end of the tensor. """ if size == 0: return tensor_var shapes = [tensor_var.shape[x] for x in xrange(tensor_var.ndim)] new_shape = [size + shapes[0]] + shapes[1:] empty = tensor.AllocEmpty(tensor_var.dtype)(*new_shape) ret = tensor.set_subtensor(empty[:shapes[0]], tensor_var) ret.tag.nan_guard_mode_check = False return ret
Example #12
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_advset_subtensor1_2d(): """ Test GPU version of set_subtensor on matrices (uses GpuAdvancedIncSubtensor1_dev20 if compute capability >= 2.0) """ shp = (10,5) shared = cuda.shared_constructor xval = numpy.arange(numpy.prod(shp), dtype='float32').reshape(shp) + 1 idxs = numpy.array([0,2,5,7,3], dtype='int32') yval = numpy.ones((len(idxs), shp[1]), dtype='float32')*10 x = shared(xval, name='x') y = T.tensor(dtype='float32', broadcastable=(False,) * len(shp), name='y') expr = T.advanced_set_subtensor1(x, y, idxs) f = theano.function([y], expr, mode=mode_with_gpu) assert sum([isinstance(node.op, cuda.GpuAdvancedIncSubtensor1) for node in f.maker.fgraph.toposort()]) == 1 rval = f(yval) rep = xval.copy() rep[idxs] = yval utt.assert_allclose(rval, rep)
Example #13
Source File: nbow.py From text_convnet with MIT License | 6 votes |
def adagrad_update(self, cost, learning_rate, eps=1e-8): params = [ p if p != self.slices else self.EMB for p in self.params ] accumulators = [ theano.shared(numpy.zeros(p.get_value(borrow=True).shape, dtype=theano.config.floatX)) for p in params ] gparams = [ T.grad(cost, param) for param in self.params ] self.gparams = gparams updates = [ ] for param, gparam, acc in zip(self.params, gparams, accumulators): if param == self.slices: acc_slices = acc[self.x.flatten()] new_acc_slices = acc_slices + gparam**2 updates.append( (acc, T.set_subtensor(acc_slices, new_acc_slices)) ) updates.append( (self.EMB, T.inc_subtensor(param, - learning_rate * gparam / T.sqrt(new_acc_slices+eps))) ) else: new_acc = acc + gparam**2 updates.append( (acc, new_acc) ) updates.append( (param, param - learning_rate * gparam / T.sqrt(new_acc + eps)) ) return updates
Example #14
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_advset_subtensor1(): """ Test GPU version of set_subtensor on vectors (uses GpuAdvancedIncSubtensor1) """ shp = (10,) shared = cuda.shared_constructor xval = numpy.arange(shp[0], dtype='float32').reshape(shp) + 1 idxs = numpy.array([0,2,5,7,3], dtype='int32') yval = numpy.ones(len(idxs), dtype='float32')*10 x = shared(xval, name='x') y = T.tensor(dtype='float32', broadcastable=(False,) * len(shp), name='y') expr = T.advanced_set_subtensor1(x, y, idxs) f = theano.function([y], expr, mode=mode_with_gpu) assert sum([isinstance(node.op, cuda.GpuAdvancedIncSubtensor1) for node in f.maker.fgraph.toposort()]) == 1 rval = f(yval) rep = xval.copy() rep[idxs] = yval utt.assert_allclose(rval, rep)
Example #15
Source File: gru4rec.py From sars_tutorial with MIT License | 6 votes |
def rmsprop(self, param, grad, updates, sample_idx=None, epsilon=1e-6): v1 = np.float32(self.adapt_params[0]) v2 = np.float32(1.0 - self.adapt_params[0]) acc = theano.shared(param.get_value(borrow=False) * 0., borrow=True) if sample_idx is None: acc_new = v1 * acc + v2 * grad ** 2 updates[acc] = acc_new else: acc_s = acc[sample_idx] # acc_new = v1 * acc_s + v2 * grad ** 2 #Faster, but inaccurate when an index occurs multiple times # updates[acc] = T.set_subtensor(acc_s, acc_new) #Faster, but inaccurate when an index occurs multiple times updates[acc] = T.inc_subtensor(T.set_subtensor(acc_s, acc_s * v1)[sample_idx], v2 * grad ** 2) # Slower, but accurate when an index occurs multiple times acc_new = updates[acc][sample_idx] # Slower, but accurate when an index occurs multiple times gradient_scaling = T.cast(T.sqrt(acc_new + epsilon), theano.config.floatX) return grad / gradient_scaling
Example #16
Source File: hgru4rec.py From sars_tutorial with MIT License | 6 votes |
def adadelta(self, param, grad, updates, sample_idx=None, epsilon=1e-6): v1 = np.float32(self.decay) v2 = np.float32(1.0 - self.decay) acc = theano.shared(param.get_value(borrow=False) * 0., borrow=True) upd = theano.shared(param.get_value(borrow=False) * 0., borrow=True) if sample_idx is None: acc_new = acc + grad ** 2 updates[acc] = acc_new grad = T.sqrt(upd + epsilon) * grad upd_new = v1 * upd + v2 * grad ** 2 updates[upd] = upd_new else: acc_s = acc[sample_idx] acc_new = acc_s + grad ** 2 updates[acc] = T.set_subtensor(acc_s, acc_new) upd_s = upd[sample_idx] upd_new = v1 * upd_s + v2 * grad ** 2 updates[upd] = T.set_subtensor(upd_s, upd_new) grad = T.sqrt(upd_s + epsilon) * grad gradient_scaling = T.cast(T.sqrt(acc_new + epsilon), theano.config.floatX) return grad / gradient_scaling
Example #17
Source File: hgru4rec.py From sars_tutorial with MIT License | 6 votes |
def adam(self, param, grad, updates, sample_idx=None, epsilon=1e-6): v1 = np.float32(self.decay) v2 = np.float32(1.0 - self.decay) acc = theano.shared(param.get_value(borrow=False) * 0., borrow=True) meang = theano.shared(param.get_value(borrow=False) * 0., borrow=True) countt = theano.shared(param.get_value(borrow=False) * 0., borrow=True) if sample_idx is None: acc_new = v1 * acc + v2 * grad ** 2 meang_new = v1 * meang + v2 * grad countt_new = countt + 1 updates[acc] = acc_new updates[meang] = meang_new updates[countt] = countt_new else: acc_s = acc[sample_idx] meang_s = meang[sample_idx] countt_s = countt[sample_idx] acc_new = v1 * acc_s + v2 * grad ** 2 meang_new = v1 * meang_s + v2 * grad countt_new = countt_s + 1.0 updates[acc] = T.set_subtensor(acc_s, acc_new) updates[meang] = T.set_subtensor(meang_s, meang_new) updates[countt] = T.set_subtensor(countt_s, countt_new) return (meang_new / (1 - v1 ** countt_new)) / (T.sqrt(acc_new / (1 - v1 ** countt_new)) + epsilon)
Example #18
Source File: theano_backend.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev): active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()] active_next = T.cast(T.minimum( T.maximum( active + 1, T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1 ), log_p_curr.shape[0]), 'int32') common_factor = T.max(log_p_prev[:active]) p_prev = T.exp(log_p_prev[:active] - common_factor) _p_prev = zeros[:active_next] # copy over _p_prev = T.set_subtensor(_p_prev[:active], p_prev) # previous transitions _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1]) # skip transitions _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs]) updated_log_p_prev = T.log(_p_prev) + common_factor log_p_next = T.set_subtensor( zeros[:active_next], log_p_curr[:active_next] + updated_log_p_prev ) return active_next, log_p_next
Example #19
Source File: theano_backend.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def temporal_padding(x, padding=(1, 1)): """Pad the middle dimension of a 3D tensor with "padding" zeros left and right. Apologies for the inane API, but Theano makes this really hard. """ assert len(padding) == 2 input_shape = x.shape output_shape = (input_shape[0], input_shape[1] + padding[0] + padding[1], input_shape[2]) output = T.zeros(output_shape) result = T.set_subtensor(output[:, padding[0]:x.shape[1] + padding[0], :], x) if hasattr(x, '_keras_shape'): result._keras_shape = (x._keras_shape[0], x._keras_shape[1] + py_sum(padding), x._keras_shape[2]) return result
Example #20
Source File: toolbox.py From Theano-Lights with MIT License | 6 votes |
def depool(X, factor=2): """ Luke perforated upsample: http://www.brml.org/uploads/tx_sibibtex/281.pdf """ output_shape = [ X.shape[1], X.shape[2]*factor, X.shape[3]*factor ] stride = X.shape[2] offset = X.shape[3] in_dim = stride * offset out_dim = in_dim * factor * factor upsamp_matrix = T.zeros((in_dim, out_dim)) rows = T.arange(in_dim) cols = rows*factor + (rows/stride * factor * offset) upsamp_matrix = T.set_subtensor(upsamp_matrix[rows, cols], 1.) flat = T.reshape(X, (X.shape[0], output_shape[0], X.shape[2] * X.shape[3])) up_flat = T.dot(flat, upsamp_matrix) upsamp = T.reshape(up_flat, (X.shape[0], output_shape[0], output_shape[1], output_shape[2])) return upsamp
Example #21
Source File: NN_ConvLayer_3D.py From Deep_MRI_brain_extraction with MIT License | 6 votes |
def combine_fragments_to_dense_bxcyz(self, tensor, sh): """ expected shape: (batch, x, channels, y, z)""" ttensor = tensor # be same shape as result, no significant time cost output_stride = self.output_stride if isinstance(output_stride, list) or isinstance(output_stride, tuple): example_stride = np.prod(output_stride)#**3 else: example_stride = output_stride**3 output_stride = np.asarray((output_stride,)*3) zero = np.array((0), dtype=theano.config.floatX) embedding = T.alloc( zero, 1, sh[1]*output_stride[0], sh[2], sh[3]*output_stride[1], sh[4]*output_stride[2]) # first arg. is fill-value (0 in this case) and not an element of the shape ix = offset_map(output_stride) print " output_stride",output_stride print " example_stride",example_stride for i,(n,m,k) in enumerate(ix): embedding = T.set_subtensor(embedding[:,n::output_stride[0],:,m::output_stride[1],k::output_stride[2]], ttensor[i::example_stride]) return embedding
Example #22
Source File: utils.py From RaptorX-Contact with GNU General Public License v3.0 | 6 votes |
def ConvByPattern(x, patterns, mask=None): W = np.transpose(patterns, (3, 0, 1, 2)) out2 = T.nnet.conv2d(x.dimshuffle(0, 3, 1, 2), W, filter_shape=W.shape, border_mode='half') if mask is not None: ## mask has shape (batchSize, #rows_to_be_masked, nCols) ## a subtensor of out2 along the horiz direction out2_sub_horiz = out2[:, :, :mask.shape[1], :] mask_horiz = mask.dimshuffle(0, 'x', 1, 2) out3 = T.set_subtensor(out2_sub_horiz, T.mul(out2_sub_horiz, mask_horiz) ) ## a subtensor of out3 along the vertical direction out3_sub_vertical = out3[:, :, :, :mask.shape[1] ] mask_vertical = mask.dimshuffle(0, 'x', 2, 1) y = T.set_subtensor(out3_sub_vertical, T.mul(out3_sub_vertical, mask_vertical) ) else: y = out2 y = y.dimshuffle(0, 2, 3, 1) return y/np.prod(patterns.shape[1:3])
Example #23
Source File: model.py From text_convnet with MIT License | 6 votes |
def adagrad_update(self, cost, learning_rate, eps=1e-8): params = [ p if p != self.slices else self.EMB for p in self.params ] accumulators = [ theano.shared(numpy.zeros(p.get_value(borrow=True).shape, dtype=theano.config.floatX)) for p in params ] gparams = [ T.grad(cost, param) for param in self.params ] self.gparams = gparams updates = [ ] for param, gparam, acc in zip(self.params, gparams, accumulators): if param == self.slices: acc_slices = acc[self.x.flatten()] new_acc_slices = acc_slices + gparam**2 updates.append( (acc, T.set_subtensor(acc_slices, new_acc_slices)) ) updates.append( (self.EMB, T.inc_subtensor(param, - learning_rate * gparam / T.sqrt(new_acc_slices+eps))) ) else: new_acc = acc + gparam**2 updates.append( (acc, new_acc) ) updates.append( (param, param - learning_rate * gparam / T.sqrt(new_acc + eps)) ) return updates
Example #24
Source File: toolbox.py From Theano-Lights with MIT License | 5 votes |
def concatenate(tensor_list, axis=0): if axis < 0: axis += tensor_list[0].ndim concat_size = sum(tensor.shape[axis] for tensor in tensor_list) output_shape = () for k in range(axis): output_shape += (tensor_list[0].shape[k],) output_shape += (concat_size,) for k in range(axis + 1, tensor_list[0].ndim): output_shape += (tensor_list[0].shape[k],) out = T.zeros(output_shape) offset = 0 for tensor in tensor_list: indices = () for k in range(axis): indices += (slice(None),) indices += (slice(offset, offset + tensor.shape[axis]),) for k in range(axis + 1, tensor_list[0].ndim): indices += (slice(None),) out = T.set_subtensor(out[indices], tensor) offset += tensor.shape[axis] return out
Example #25
Source File: toolbox.py From Theano-Lights with MIT License | 5 votes |
def theano_one_hot(idxs, n): z = T.zeros((idxs.shape[0], n)) one_hot = T.set_subtensor(z[T.arange(idxs.shape[0]), idxs], 1) return one_hot
Example #26
Source File: util.py From gated-graph-transformer-network with MIT License | 5 votes |
def categorical_best(tensor): """ tensor should be a tensor of shape (..., categories) Return a new tensor of the same shape but one-hot at position of best category """ flat_tensor = tensor.reshape([-1, tensor.shape[-1]]) argmax_posns = T.argmax(flat_tensor, 1) flat_snapped = T.zeros_like(flat_tensor) flat_snapped = T.set_subtensor(flat_snapped[T.arange(flat_tensor.shape[0]), argmax_posns], 1.0) snapped = flat_snapped.reshape(tensor.shape) return snapped
Example #27
Source File: conv.py From iaf with MIT License | 5 votes |
def pad2dwithchannel(x, size_kernel): assert size_kernel[0]>1 or size_kernel[1]>1 assert size_kernel[0]%2 == 1 assert size_kernel[1]%2 == 1 a = (size_kernel[0]-1)/2 b = (size_kernel[1]-1)/2 if True: n_channels = x.shape[1] result_shape = (x.shape[0],x.shape[1]+1,x.shape[2]+2*a,x.shape[3]+2*b) result = T.zeros(result_shape, dtype=G.floatX) result = T.set_subtensor(result[:,n_channels,:,:], 1.) result = T.set_subtensor(result[:,n_channels,a:-a,b:-b], 0.) result = T.set_subtensor(result[:,:n_channels,a:-a,b:-b], x) else: # new code, requires that the minibatch size 'x.tag.test_value.shape[0]' is the same during execution # I thought this would be more memory-efficient, but seems not the case in practice print 'new code, requires that the minibatch size "x.tag.test_value.shape[0]" is the same during execution' x_shape = x.tag.test_value.shape n_channels = x_shape[1] result_shape = (x_shape[0],x_shape[1]+1,x_shape[2]+2*a,x_shape[3]+2*b) result = np.zeros(result_shape,dtype=G.floatX) result[:,n_channels,:,:] = 1. result[:,n_channels,a:-a,b:-b] = 0. result = T.constant(result) result = T.set_subtensor(result[:,:n_channels,a:-a,b:-b], x) return result # Multi-scale conv
Example #28
Source File: conv.py From iaf with MIT License | 5 votes |
def pad2d(x, n_padding): result_shape = (x.shape[0],x.shape[1],x.shape[2]+2*n_padding,x.shape[3]+2*n_padding) result = T.zeros(result_shape, dtype=G.floatX) result = T.set_subtensor(result[:,:,n_padding:-n_padding,n_padding:-n_padding], x) return result # Pad input, add extra channel
Example #29
Source File: conv.py From iaf with MIT License | 5 votes |
def upsample2d_perforated(x): shape = x.shape x = x.reshape((shape[0], shape[1], shape[2], 1, shape[3], 1)) y = T.zeros((shape[0], shape[1], shape[2], 2, shape[3], 2),dtype=G.floatX) x = T.set_subtensor(y[:,:,:,0:1,:,0:1], x) x = x.reshape((shape[0], shape[1], shape[2]*2, shape[3]*2)) return x # Pad input
Example #30
Source File: ff_layers.py From GroundHog with BSD 3-Clause "New" or "Revised" License | 5 votes |
def fprop(self, var): rval = TT.zeros_like(var) if self.n >0: rval = TT.set_subtensor(rval[self.n:], var[:-self.n]) elif self.n<0: rval = TT.set_subtensor(rval[:self.n], var[-self.n:]) self.out = rval return rval