Python theano.tensor.gt() Examples
The following are 30
code examples of theano.tensor.gt().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
theano.tensor
, or try the search function
.
Example #1
Source File: theano_utils.py From pysaliency with MIT License | 6 votes |
def __init__(self, input, centerbias = None, alpha=1.0): self.input = input if centerbias is None: centerbias = np.ones(12) self.alpha = theano.shared(value = np.array(alpha).astype(theano.config.floatX), name='alpha') self.centerbias_ys = theano.shared(value=np.array(centerbias, dtype=theano.config.floatX), name='centerbias_ys') self.centerbias_xs = theano.shared(value=np.linspace(0, 1, len(centerbias), dtype=theano.config.floatX), name='centerbias_xs') height = T.cast(input.shape[0], theano.config.floatX) width = T.cast(input.shape[1], theano.config.floatX) x_coords = (T.arange(width) - 0.5*width) / (0.5*width) y_coords = (T.arange(height) - 0.5*height) / (0.5*height) + 0.0001 # We cannot have zeros in there because of grad x_coords = x_coords.dimshuffle('x', 0) y_coords = y_coords.dimshuffle(0, 'x') dists = T.sqrt(T.square(x_coords) + self.alpha*T.square(y_coords)) self.max_dist = T.sqrt(1 + self.alpha) self.dists = dists/self.max_dist self.factors = nonlinearity(self.dists, self.centerbias_xs, self.centerbias_ys, len(centerbias)) apply_centerbias = T.gt(self.centerbias_ys.shape[0], 2) self.output = ifelse(apply_centerbias, self.input+self.factors, self.input) self.params = [self.centerbias_ys, self.alpha]
Example #2
Source File: theano_utils.py From pysaliency with MIT License | 6 votes |
def __init__(self, input, centerbias = None, alpha=1.0): self.input = input if centerbias is None: centerbias = np.ones(12) self.alpha = theano.shared(value = np.array(alpha).astype(theano.config.floatX), name='alpha') self.centerbias_ys = theano.shared(value=np.array(centerbias, dtype=theano.config.floatX), name='centerbias_ys') self.centerbias_xs = theano.shared(value=np.linspace(0, 1, len(centerbias), dtype=theano.config.floatX), name='centerbias_xs') height = T.cast(input.shape[0], theano.config.floatX) width = T.cast(input.shape[1], theano.config.floatX) x_coords = (T.arange(width) - 0.5*width) / (0.5*width) y_coords = (T.arange(height) - 0.5*height) / (0.5*height) + 0.0001 # We cannot have zeros in there because of grad x_coords = x_coords.dimshuffle('x', 0) y_coords = y_coords.dimshuffle(0, 'x') dists = T.sqrt(T.square(x_coords) + self.alpha*T.square(y_coords)) self.max_dist = T.sqrt(1 + self.alpha) self.dists = dists/self.max_dist self.factors = nonlinearity(self.dists, self.centerbias_xs, self.centerbias_ys, len(centerbias)) apply_centerbias = T.gt(self.centerbias_ys.shape[0], 2) self.output = ifelse(apply_centerbias, self.input*self.factors, self.input) self.params = [self.centerbias_ys, self.alpha]
Example #3
Source File: test_opt.py From D-VAE with MIT License | 6 votes |
def test_pdbbreakpoint_op(): """ Test that PdbBreakpoint ops don't block gpu optimization""" b = tensor.fmatrix() # Create a function composed of a breakpoint followed by # some computation condition = tensor.gt(b.sum(), 0) b_monitored = PdbBreakpoint(name='TestBreakpoint')(condition, b) output = b_monitored ** 2 f = theano.function([b], output, mode=mode_with_gpu) # Ensure that, in the compiled function, the computation following the # breakpoint has been moved to the gpu. topo = f.maker.fgraph.toposort() assert isinstance(topo[-2].op, cuda.GpuElemwise) assert topo[-1].op == cuda.host_from_gpu
Example #4
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_elemwise_comparaison_cast(): """ test if an elemwise comparaison followed by a cast to float32 are pushed to gpu. """ a = tensor.fmatrix() b = tensor.fmatrix() av = theano._asarray(numpy.random.rand(4, 4), dtype='float32') bv = numpy.ones((4, 4), dtype='float32') for g, ans in [(tensor.lt, av < bv), (tensor.gt, av > bv), (tensor.le, av <= bv), (tensor.ge, av >= bv)]: f = pfunc([a, b], tensor.cast(g(a, b), 'float32'), mode=mode_with_gpu) out = f(av, bv) assert numpy.all(out == ans) assert any([isinstance(node.op, cuda.GpuElemwise) for node in f.maker.fgraph.toposort()])
Example #5
Source File: test_opt.py From D-VAE with MIT License | 6 votes |
def test_pdbbreakpoint_op(): """ Test that PdbBreakpoint ops don't block gpu optimization""" b = tensor.fmatrix() # Create a function composed of a breakpoint followed by # some computation condition = tensor.gt(b.sum(), 0) b_monitored = PdbBreakpoint(name='TestBreakpoint')(condition, b) output = b_monitored ** 2 f = theano.function([b], output, mode=mode_with_gpu) # Ensure that, in the compiled function, the computation following the # breakpoint has been moved to the gpu. topo = f.maker.fgraph.toposort() assert isinstance(topo[-2].op, GpuElemwise) assert topo[-1].op == host_from_gpu
Example #6
Source File: test_breakpoint.py From D-VAE with MIT License | 6 votes |
def setUp(self): super(TestPdbBreakpoint, self).setUp() # Sample computation that involves tensors with different numbers # of dimensions self.input1 = T.fmatrix() self.input2 = T.fscalar() self.output = T.dot((self.input1 - self.input2), (self.input1 - self.input2).transpose()) # Declare the conditional breakpoint self.breakpointOp = PdbBreakpoint("Sum of output too high") self.condition = T.gt(self.output.sum(), 1000) (self.monitored_input1, self.monitored_input2, self.monitored_output) = self.breakpointOp(self.condition, self.input1, self.input2, self.output)
Example #7
Source File: test_breakpoint.py From attention-lvcsr with MIT License | 6 votes |
def setUp(self): super(TestPdbBreakpoint, self).setUp() # Sample computation that involves tensors with different numbers # of dimensions self.input1 = T.fmatrix() self.input2 = T.fscalar() self.output = T.dot((self.input1 - self.input2), (self.input1 - self.input2).transpose()) # Declare the conditional breakpoint self.breakpointOp = PdbBreakpoint("Sum of output too high") self.condition = T.gt(self.output.sum(), 1000) (self.monitored_input1, self.monitored_input2, self.monitored_output) = self.breakpointOp(self.condition, self.input1, self.input2, self.output)
Example #8
Source File: test_opt.py From attention-lvcsr with MIT License | 6 votes |
def test_pdbbreakpoint_op(): """ Test that PdbBreakpoint ops don't block gpu optimization""" b = tensor.fmatrix() # Create a function composed of a breakpoint followed by # some computation condition = tensor.gt(b.sum(), 0) b_monitored = PdbBreakpoint(name='TestBreakpoint')(condition, b) output = b_monitored ** 2 f = theano.function([b], output, mode=mode_with_gpu) # Ensure that, in the compiled function, the computation following the # breakpoint has been moved to the gpu. topo = f.maker.fgraph.toposort() assert isinstance(topo[-2].op, GpuElemwise) assert topo[-1].op == host_from_gpu
Example #9
Source File: test_basic_ops.py From attention-lvcsr with MIT License | 6 votes |
def test_elemwise_comparaison_cast(): """ test if an elemwise comparaison followed by a cast to float32 are pushed to gpu. """ a = tensor.fmatrix() b = tensor.fmatrix() av = theano._asarray(numpy.random.rand(4, 4), dtype='float32') bv = numpy.ones((4, 4), dtype='float32') for g, ans in [(tensor.lt, av < bv), (tensor.gt, av > bv), (tensor.le, av <= bv), (tensor.ge, av >= bv)]: f = pfunc([a, b], tensor.cast(g(a, b), 'float32'), mode=mode_with_gpu) out = f(av, bv) assert numpy.all(out == ans) assert any([isinstance(node.op, cuda.GpuElemwise) for node in f.maker.fgraph.toposort()])
Example #10
Source File: theano_backend.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def relu(x, alpha=0., max_value=None, threshold=0.): _assert_has_capability(T.nnet, 'relu') if alpha != 0.: if threshold != 0.: negative_part = T.nnet.relu(-x + threshold) else: negative_part = T.nnet.relu(-x) if threshold != 0.: x = x * T.cast(T.gt(x, threshold), floatx()) else: x = T.nnet.relu(x) if max_value is not None: x = T.clip(x, 0.0, max_value) if alpha != 0.: x -= alpha * negative_part return x
Example #11
Source File: test_opt.py From attention-lvcsr with MIT License | 6 votes |
def test_pdbbreakpoint_op(): """ Test that PdbBreakpoint ops don't block gpu optimization""" b = tensor.fmatrix() # Create a function composed of a breakpoint followed by # some computation condition = tensor.gt(b.sum(), 0) b_monitored = PdbBreakpoint(name='TestBreakpoint')(condition, b) output = b_monitored ** 2 f = theano.function([b], output, mode=mode_with_gpu) # Ensure that, in the compiled function, the computation following the # breakpoint has been moved to the gpu. topo = f.maker.fgraph.toposort() assert isinstance(topo[-2].op, cuda.GpuElemwise) assert topo[-1].op == cuda.host_from_gpu
Example #12
Source File: smthact.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def basisf(self, x, s, e): cpstart = T.le(s, x); cpend = T.gt(e, x); return 0 * (1 - cpstart) + 0.5 * (x - s)**2 * cpstart * cpend + ((e - s) * (x - e) + 0.5 * (e - s)**2) * (1 - cpend);
Example #13
Source File: smthact_new.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_output_for(self, input, **kwargs): output = input * T.gt(input, 0); for seg in range(0, self.num_segs): if self.tied_feamap: output += self.basisf(input, T.shape_padleft(T.shape_padright(self.P[seg], n_ones = len(input_dim) - 2))) \ * T.shape_padleft(T.shape_padright(self.W[seg], n_ones = len(input_dim) - 2)); else: output += self.basisf(input, T.shape_padleft(self.P[seg])) \ * T.shape_padleft(self.W[seg]); return output;
Example #14
Source File: smthact_new.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_output_for(self, input, **kwargs): if self.tied_feamap: return input * T.gt(input, 0) + input * T.le(input, 0) \ * T.shape_padleft(T.shape_padright(self.W[seg], n_ones = len(input_dim) - 2)); else: return input * T.gt(input, 0) + input * T.le(input, 0) \ * T.shape_padleft(self.W);
Example #15
Source File: smthact.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def basisf(self, x, s, e): cpstart = T.le(s, x); cpend = T.gt(e, x); return 0 * (1 - cpstart) + (x - s) * cpstart * cpend + (e - s) * (1 - cpend);
Example #16
Source File: smthact.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_output_for(self, input, **kwargs): return x * T.gt(x, 0) + x * T.le(x, 0) * self.W;
Example #17
Source File: smthact.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_output_for(self, input, **kwargs): output = input * T.gt(input, 0); for seg in range(0, self.num_segs): output += self.basisf(input, self.P[seg, :]) * self.W[seg, :]; return output;
Example #18
Source File: smthact.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_output_for(self, input, **kwargs): return input * T.gt(input, 0) + input * T.le(input, 0) * T.shape_padleft(self.W, n_ones=1);
Example #19
Source File: smthact_new.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def basisf(self, x, s, e): cpstart = T.le(s, x); cpend = T.gt(e, x); if self.order == 1: return 0 * (1 - cpstart) + (x - s) * cpstart * cpend + (e - s) * (1 - cpend); else: return 0 * (1 - cpstart) + 0.5 * (x - s)**2 * cpstart * cpend + ((e - s) * (x - e) + 0.5 * (e - s)**2) * (1 - cpend);
Example #20
Source File: smthact_new.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def basisf(self, x, bks): act_bks = T.gt(x, bks); return x * act_bks;
Example #21
Source File: smthact.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_output_for(self, input, **kwargs): return input * T.gt(input, 0) + input * T.le(input, 0) * T.shape_padleft(self.W, n_ones=1);
Example #22
Source File: smthact_new.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def basisf(self, x, bks): act_bks = T.gt(x, bks); return x * act_bks;
Example #23
Source File: smthact.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def basisf(self, x, s, e): cpstart = T.le(s, x); cpend = T.gt(e, x); return 0 * (1 - cpstart) + 0.5 * (x - s)**2 * cpstart * cpend + ((e - s) * (x - e) + 0.5 * (e - s)**2) * (1 - cpend);
Example #24
Source File: smthact.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def basisf(self, x, s, e): cpstart = T.le(s, x); cpend = T.gt(e, x); return 0 * (1 - cpstart) + (x - s) * cpstart * cpend + (e - s) * (1 - cpend);
Example #25
Source File: smthact.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_output_for(self, input, **kwargs): return input * T.gt(input, 0) + input * T.le(input, 0) * T.shape_padleft(self.W, n_ones=1);
Example #26
Source File: smthact.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_output_for(self, input, **kwargs): output = input * T.gt(input, 0); for seg in range(0, self.num_segs): output += self.basisf(input, self.P[seg, :]) * self.W[seg, :]; return output;
Example #27
Source File: smthact.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def basisf(self, x, bks): act_bks = T.gt(x, bks); return x * act_bks;
Example #28
Source File: smthact.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def basisf(self, x, s, e): cpstart = T.le(s, x); cpend = T.gt(e, x); return 0 * (1 - cpstart) + (x - s) * cpstart * cpend + (e - s) * (1 - cpend);
Example #29
Source File: smthact_new.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_output_for(self, input, **kwargs): if self.tied_feamap: return input * T.gt(input, 0) + input * T.le(input, 0) \ * T.shape_padleft(T.shape_padright(self.W[seg], n_ones = len(input_dim) - 2)); else: return input * T.gt(input, 0) + input * T.le(input, 0) \ * T.shape_padleft(self.W);
Example #30
Source File: smthact_new.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_output_for(self, input, **kwargs): output = input * T.gt(input, 0); for seg in range(0, self.num_segs): if self.tied_feamap: output += self.basisf(input, T.shape_padleft(T.shape_padright(self.P[seg], n_ones = len(input_dim) - 2))) \ * T.shape_padleft(T.shape_padright(self.W[seg], n_ones = len(input_dim) - 2)); else: output += self.basisf(input, T.shape_padleft(self.P[seg])) \ * T.shape_padleft(self.W[seg]); return output;