Python theano.tensor.dot() Examples

The following are 30 code examples of theano.tensor.dot(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano.tensor , or try the search function .
Example #1
Source File: skipthoughts.py    From text-to-image with MIT License 6 votes vote down vote up
def nn(model, text, vectors, query, k=5):
	"""
	Return the nearest neighbour sentences to query
	text: list of sentences
	vectors: the corresponding representations for text
	query: a string to search
	"""
	qf = encode(model, [query])
	qf /= norm(qf)
	scores = numpy.dot(qf, vectors.T).flatten()
	sorted_args = numpy.argsort(scores)[::-1]
	sentences = [text[a] for a in sorted_args[:k]]
	print 'QUERY: ' + query
	print 'NEAREST: '
	for i, s in enumerate(sentences):
		print s, sorted_args[i] 
Example #2
Source File: test_stack.py    From spinn with MIT License 6 votes vote down vote up
def setUp(self):
        if 'gpu' not in theano.config.device:
            raise RuntimeError("Thin stack only defined for GPU usage")

        self.embedding_dim = 10
        self.model_dim = 20
        self.vocab_size = 5
        self.batch_size = 2
        self.num_classes = 2

        self.vs = VariableStore()

        def compose_network((c1, c2), hidden, *args, **kwargs):
            conc = T.concatenate([hidden, c1, c2], axis=1)

            W = self.vs.add_param("W", (self.model_dim / 2 + self.model_dim * 2, self.model_dim))
            b = self.vs.add_param("b", (self.model_dim,),
                                  initializer=util.ZeroInitializer())
            return T.dot(conc, W) + b 
Example #3
Source File: nn.py    From Att-ChemdNER with Apache License 2.0 6 votes vote down vote up
def step(self,x, h_tm1,c_tm1):
#{{{
        z = K.dot(x , self.W) + K.dot(h_tm1 , self.U) + self.b
        if self.with_batch:
            z0 = z[:,:self.output_dim]
            z1 = z[:,self.output_dim: 2 * self.output_dim]
            z2 = z[:,2 * self.output_dim: 3 * self.output_dim]
            z3 = z[:,3 * self.output_dim:]
        else:
            z0 = z[:self.output_dim]
            z1 = z[self.output_dim: 2 * self.output_dim]
            z2 = z[2 * self.output_dim: 3 * self.output_dim]
            z3 = z[3 * self.output_dim:]


        i = self.inner_activation(z0)
        f = self.inner_activation(z1)
        c = f * c_tm1 + i * self.activation(z2)
        o = self.inner_activation(z3) 
        h=o*self.activation(c);
        return  h,c;
#}}} 
Example #4
Source File: test_stack.py    From spinn with MIT License 6 votes vote down vote up
def setUp(self):
        if 'gpu' not in theano.config.device:
            raise RuntimeError("Thin stack only defined for GPU usage")

        self.embedding_dim = self.model_dim = 2
        self.vocab_size = 5
        self.seq_length = 5
        self.batch_size = 2
        self.num_classes = 2

        spec = util.ModelSpec(self.model_dim, self.embedding_dim,
                              self.batch_size, self.vocab_size,
                              self.seq_length)

        self.vs = vs = VariableStore()
        def compose_network((c1, c2), *args, **kwargs):
            W = vs.add_param("W", (self.model_dim * 2, self.model_dim))
            b = vs.add_param("b", (self.model_dim,),
                             initializer=util.ZeroInitializer())
            return T.dot(T.concatenate([c1, c2], axis=1), W) + b 
Example #5
Source File: nn.py    From Att-ChemdNER with Apache License 2.0 6 votes vote down vote up
def step(self, word,h_tm1,c_tm1,x):
#{{{
        H=x;
        input_length=x.shape[0];
        C=T.repeat(c_tm1.reshape((1,-1)),input_length,axis=0);
        _HC=K.concatenate([H,C]);
        energy=T.dot(_HC,self.W_A.reshape((-1,1)))+self.b_A;
        energy=K.softmax(energy.reshape((1,-1)));
        x=(H*energy.reshape((-1,1))).sum(axis=0)

        #combine glimpsed with word;
        combine=K.concatenate([x,word]);
        combined=K.dot(combine,self.W_combine)+self.b_combine;
        #original LSTM step
        h_t,c_t=super(AttentionLSTM,self).step_noBatch(combined,h_tm1,c_tm1);
        return  h_t,c_t
#}}} 
Example #6
Source File: net.py    From Depth-Map-Prediction with GNU General Public License v3.0 6 votes vote down vote up
def infer(self, x):
        (nfilt, fc, fi, fj) = self.filter_shape
        if (fi, fj) == (1, 1):#如果卷积核的大小为1*1的情况
            W = self.W.reshape((nfilt, fc))
            (bsize, nc, ni, nj) = x.shape
            xvec = x.transpose((1,0,2,3)).reshape((nc, bsize*ni*nj))
            if self.transpose:
                y = T.dot(W.T, xvec)
                y = y.reshape((fc, bsize, ni, nj)).transpose((1,0,2,3))
            else:
                y = T.dot(W, xvec)
                y = y.reshape((nfilt, bsize, ni, nj)).transpose((1,0,2,3))
            y = thutil.gpu_contiguous(y)
        else:#正常的卷积层
            y = conv(x, self.W, border_mode=self.conv_mode,
                                transpose=self.transpose,
                                stride=self.stride)
        if self.have_bias:
            y += self.b.reshape((1, self.b.shape[0], 1, 1))
        return y

#全连接层 
Example #7
Source File: test_stack.py    From spinn with MIT License 6 votes vote down vote up
def setUp(self):
        if 'gpu' not in theano.config.device:
            raise RuntimeError("Thin stack only defined for GPU usage")

        self.embedding_dim = self.model_dim = 2
        self.vocab_size = 5
        self.batch_size = 2
        self.num_classes = 2

        self.vs = VariableStore()

        def compose_network((c1, c2), hidden, *args, **kwargs):
            conc = T.concatenate([hidden, c1, c2], axis=1)

            W = self.vs.add_param("W", (self.model_dim / 2 + self.model_dim * 2, self.model_dim))
            b = self.vs.add_param("b", (self.model_dim,),
                                  initializer=util.ZeroInitializer())
            return T.dot(conc, W) + b 
Example #8
Source File: test_stack.py    From spinn with MIT License 6 votes vote down vote up
def setUp(self):
        if 'gpu' not in theano.config.device:
            raise RuntimeError("Thin stack only defined for GPU usage")

        self.embedding_dim = self.model_dim = 100
        self.vocab_size = 1000
        self.seq_length = 50
        self.batch_size = 256
        self.num_classes = 2

        spec = util.ModelSpec(self.model_dim, self.embedding_dim,
                              self.batch_size, self.vocab_size,
                              self.seq_length)

        self.vs = vs = VariableStore()
        def compose_network((c1, c2), *args, **kwargs):
            W1 = vs.add_param("W1", (self.model_dim, self.model_dim))
            W2 = vs.add_param("W2", (self.model_dim, self.model_dim))
            b = vs.add_param("b", (self.model_dim,),
                             initializer=util.ZeroInitializer())
            # TODO inplace add?
            return T.dot(c1, W1) + T.dot(c2, W2) + b 
Example #9
Source File: blocks.py    From spinn with MIT License 6 votes vote down vote up
def Linear(inp, inp_dim, outp_dim, vs, name="linear_layer", use_bias=True, initializer=None, bias_initializer=None):
    if isinstance(inp, tuple):
        assert isinstance(inp_dim, tuple)
        # Build initializers which are aware of the real shape of the overall
        # (unsplit) matrix.
        real_inp_dim = sum(inp_dim)
        initializer = partial(initializer or vs.default_initializer,
                              real_shape=(real_inp_dim, outp_dim))

        try:
            Ws = [vs.add_param("%s_W%i" % (name, i), (dim_i, outp_dim),
                               initializer=initializer)
                  for i, dim_i in enumerate(inp_dim)]
        except TypeError, e:
            raise RuntimeError(
                "TypeError in vs initialization for split Gemm. Does the "
                "initializer you provided (%s) support real_shape?"
                % initializer, e)

        outp = T.dot(inp[0], Ws[0])
        for inp_i, W_i in zip(inp[1:], Ws[1:]):
            # TODO inplace add?
            outp += T.dot(inp_i, W_i) 
Example #10
Source File: cbc_hb.py    From lifestyles with MIT License 6 votes vote down vote up
def _create_observation_variable(individual_selections, choices, partsworth):
    """
    This function handles creating the PyMC3 observation variables.  It also gracefully handles missing observations in individual selections.

    `individual_selections` is a Series of the individuals selections made, starting from 0. It can contain NaNs which represent answer was not provided.

    `choices` is a DataFrame with a hierarchical index: level=0 enumerates the choices, and level=1 displays the profile at a specific choice.
    It's size is (n_questions, n_choices_per_question).

    `partsworth` is a slice of PyMC3 matrix. It represents the partsworth variables of a individual. Size is (n_profiles,)

    This computes the values exp(partsworth * profile_j) / sum[ exp(partsworth * profile_k ] for all j.
    """
    nan_mask = pd.notnull(individual_selections)
    return pm.Categorical("Obs_%s" % individual_selections.name,
                          tt.nnet.softmax(tt.stack([
                            tt.dot(choice.values, partsworth) for _, choice in choices[nan_mask.values].groupby(axis=1, level=0)
                          ], axis=0).T),
                          observed=individual_selections[nan_mask.values].values) 
Example #11
Source File: layers.py    From 3D-R2N2 with MIT License 6 votes vote down vote up
def set_output(self):
        padding = self._padding
        input_shape = self._input_shape
        padded_input = tensor.alloc(0.0,  # Value to fill the tensor
                                    input_shape[0],
                                    input_shape[1] + 2 * padding[1],
                                    input_shape[2],
                                    input_shape[3] + 2 * padding[3],
                                    input_shape[4] + 2 * padding[4])

        padded_input = tensor.set_subtensor(padded_input[:, padding[1]:padding[1] + input_shape[
            1], :, padding[3]:padding[3] + input_shape[3], padding[4]:padding[4] + input_shape[4]],
                                            self._prev_layer.output)

        fc_output = tensor.reshape(
            tensor.dot(self._fc_layer.output, self.Wx.val), self._output_shape)
        self._output = conv3d2d.conv3d(padded_input, self.Wh.val) + \
            fc_output + self.b.val.dimshuffle('x', 'x', 0, 'x', 'x') 
Example #12
Source File: nn.py    From opt-mmd with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_output_for(self, input, init=False, deterministic=False, **kwargs):
        if input.ndim > 2:
            # if the input has more than two dimensions, flatten it into a
            # batch of feature vectors.
            input = input.flatten(2)

        activation = T.dot(input, self.W)

        if init:
            ma = T.mean(activation, axis=0)
            activation -= ma.dimshuffle('x',0)
            stdv = T.sqrt(T.mean(T.square(activation),axis=0))
            activation /= stdv.dimshuffle('x',0)
            self.init_updates = [(self.weight_scale, self.weight_scale/stdv), (self.b, -ma/stdv)]
        else:
            activation += self.b.dimshuffle('x', 0)

        return self.nonlinearity(activation) 
Example #13
Source File: mmd.py    From opt-mmd with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def linear_mmd2_and_hotelling(X, Y, biased=True, reg=0):
    if not biased:
        raise ValueError("linear_mmd2_and_hotelling only works for biased est")

    n = X.shape[0]
    p = X.shape[1]
    Z = X - Y
    Z_bar = Z.mean(axis=0)
    mmd2 = Z_bar.dot(Z_bar)

    Z_cent = Z - Z_bar
    S = Z_cent.T.dot(Z_cent) / (n - 1)
    # z' inv(S) z = z' inv(L L') z = z' inv(L)' inv(L) z = ||inv(L) z||^2
    L = slinalg.cholesky(S + reg * T.eye(p))
    Linv_Z_bar = slinalg.solve_lower_triangular(L, Z_bar)
    lambda_ = n * Linv_Z_bar.dot(Linv_Z_bar)
    # happens on the CPU!
    return mmd2, lambda_ 
Example #14
Source File: train_dcgan_utils.py    From iGAN with MIT License 6 votes vote down vote up
def gen_batchnorm(_z, _params, n_layers=3, n_f=128, init_sz=4, nc=3):
    [gw0, gg0, gb0] = _params[0:3]
    hs = []
    h0_o = T.dot(_z, gw0)
    output = [h0_o]
    h0 = relu(batchnorm(h0_o, g=gg0, b=gb0))
    h1 = h0.reshape((h0.shape[0], n_f * 2 ** n_layers, init_sz, init_sz))
    hs.extend([h0, h1])
    for n in range(n_layers):
        [w, g, b] = _params[3 * (n + 1):3 * (n + 2)]
        hin = hs[-1]
        h_o = deconv(hin, w, subsample=(2, 2), border_mode=(2, 2))
        hout = relu(batchnorm(h_o, g=g, b=b))
        hs.append(hout)
        output.append(h_o)

    if nc == 3:
        x = tanh(deconv(hs[-1], _params[-1], subsample=(2, 2), border_mode=(2, 2)))
    if nc == 1:
        x = sigmoid(deconv(hs[-1], _params[-1], subsample=(2, 2), border_mode=(2, 2)))

    return x, output 
Example #15
Source File: nn.py    From Att-ChemdNER with Apache License 2.0 6 votes vote down vote up
def step(self,x, h_tm1,c_tm1):
#{{{
        z=T.dot(x,self.W)+T.dot(h_tm1,self.U)+self.b;
        if self.with_batch:
            z_i=z[:,:self.output_dim];
            z_c=z[:,self.output_dim:2*self.output_dim];
            z_o=z[:,2*self.output_dim:];
        else:
            z_i=z[:self.output_dim];
            z_c=z[self.output_dim:2*self.output_dim];
            z_o=z[2*self.output_dim:];

        i_t = self.inner_activation(z_i +
                                 T.dot(c_tm1, self.w_ci))
        # f_t = T.nnet.sigmoid(T.dot(x_t, self.w_xf) +
        #                      T.dot(h_tm1, self.w_hf) +
        #                      T.dot(c_tm1, self.w_cf) +
        #                      self.b_f)
        c_t = (1 - i_t) * c_tm1 + i_t * self.activation(z_c)
        o_t = self.inner_activation(z_o +
                                 T.dot(c_t, self.w_co))
        h_t = o_t * self.activation(c_t)
        return  h_t,c_t
#}}} 
Example #16
Source File: recurrent.py    From CAPTCHA-breaking with MIT License 6 votes vote down vote up
def get_output(self, train=False):
        X = self.get_input(train)
        padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
        X = X.dimshuffle((1, 0, 2))

        x_z = T.dot(X, self.W_z) + self.b_z
        x_r = T.dot(X, self.W_r) + self.b_r
        x_h = T.dot(X, self.W_h) + self.b_h
        outputs, updates = theano.scan(
            self._step,
            sequences=[x_z, x_r, x_h, padded_mask],
            outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
            non_sequences=[self.U_z, self.U_r, self.U_h],
            truncate_gradient=self.truncate_gradient
        )
        if self.return_sequences:
            return outputs.dimshuffle((1, 0, 2))
        return outputs[-1] 
Example #17
Source File: recurrent.py    From CAPTCHA-breaking with MIT License 6 votes vote down vote up
def get_output(self, train=False):
        X = self.get_input(train)
        padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
        X = X.dimshuffle((1, 0, 2))

        x_z = T.dot(X, self.W_z) + self.b_z
        x_r = T.dot(X, self.Pmat) + self.b_r
        x_h = T.dot(X, self.W_h) + self.b_h
        outputs, updates = theano.scan(
            self._step,
            sequences=[x_z, x_r, x_h, padded_mask],
            outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
            non_sequences=[self.U_z, self.U_r, self.U_h],
            truncate_gradient=self.truncate_gradient)
        if self.return_sequences:
            return outputs.dimshuffle((1, 0, 2))
        return outputs[-1] 
Example #18
Source File: recurrent.py    From CAPTCHA-breaking with MIT License 6 votes vote down vote up
def get_output(self, train=False):
        X = self.get_input(train)
        padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
        X = X.dimshuffle((1, 0, 2))

        x_z = T.dot(X, self.W_z) + self.b_z
        x_r = T.dot(X, self.W_r) + self.b_r
        x_h = T.tanh(T.dot(X, self.Pmat)) + self.b_h
        outputs, updates = theano.scan(
            self._step,
            sequences=[x_z, x_r, x_h, padded_mask],
            outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
            non_sequences=[self.U_r, self.U_h],
            truncate_gradient=self.truncate_gradient)
        if self.return_sequences:
            return outputs.dimshuffle((1, 0, 2))
        return outputs[-1] 
Example #19
Source File: recurrent.py    From CAPTCHA-breaking with MIT License 6 votes vote down vote up
def get_output(self, train=False):
        X = self.get_input(train)
        padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
        X = X.dimshuffle((1, 0, 2))

        xi = T.dot(X, self.W_i) + self.b_i
        xf = T.dot(X, self.W_f) + self.b_f
        xc = T.dot(X, self.W_c) + self.b_c
        xo = T.dot(X, self.W_o) + self.b_o

        [outputs, memories], updates = theano.scan(
            self._step,
            sequences=[xi, xf, xo, xc, padded_mask],
            outputs_info=[
                T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
                T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
            ],
            non_sequences=[self.U_i, self.U_f, self.U_o, self.U_c],
            truncate_gradient=self.truncate_gradient)

        if self.return_sequences:
            return outputs.dimshuffle((1, 0, 2))
        return outputs[-1] 
Example #20
Source File: recurrent.py    From CAPTCHA-breaking with MIT License 6 votes vote down vote up
def get_output(self, train=False):
        X = self.get_input(train)
        padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
        X = X.dimshuffle((1, 0, 2))

        x_z = T.dot(X, self.W_z) + self.b_z
        x_r = T.dot(X, self.W_r) + self.b_r
        x_h = T.dot(X, self.W_h) + self.b_h
        outputs, updates = theano.scan(
            self._step,
            sequences=[x_z, x_r, x_h, padded_mask],
            outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
            non_sequences=[self.U_z, self.U_r, self.U_h],
            truncate_gradient=self.truncate_gradient)

        if self.return_sequences:
            return outputs.dimshuffle((1, 0, 2))
        return outputs[-1] 
Example #21
Source File: recurrent.py    From CAPTCHA-breaking with MIT License 6 votes vote down vote up
def get_output(self, train=False):
        X = self.get_input(train)  # shape: (nb_samples, time (padded with zeros), input_dim)
        # new shape: (time, nb_samples, input_dim) -> because theano.scan iterates over main dimension
        padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
        X = X.dimshuffle((1, 0, 2))
        x = T.dot(X, self.W) + self.b

        # scan = theano symbolic loop.
        # See: http://deeplearning.net/software/theano/library/scan.html
        # Iterate over the first dimension of the x array (=time).
        outputs, updates = theano.scan(
            self._step,  # this will be called with arguments (sequences[i], outputs[i-1], non_sequences[i])
            sequences=[x, dict(input=padded_mask, taps=[-1])],  # tensors to iterate over, inputs to _step
            # initialization of the output. Input to _step with default tap=-1.
            outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
            non_sequences=self.U,  # static inputs to _step
            truncate_gradient=self.truncate_gradient)

        if self.return_sequences:
            return outputs.dimshuffle((1, 0, 2))
        return outputs[-1] 
Example #22
Source File: 5_convolutional_net.py    From Theano-Tutorials with MIT License 6 votes vote down vote up
def model(X, w, w2, w3, w4, p_drop_conv, p_drop_hidden):
    l1a = rectify(conv2d(X, w, border_mode='full'))
    l1 = max_pool_2d(l1a, (2, 2))
    l1 = dropout(l1, p_drop_conv)

    l2a = rectify(conv2d(l1, w2))
    l2 = max_pool_2d(l2a, (2, 2))
    l2 = dropout(l2, p_drop_conv)

    l3a = rectify(conv2d(l2, w3))
    l3b = max_pool_2d(l3a, (2, 2))
    l3 = T.flatten(l3b, outdim=2)
    l3 = dropout(l3, p_drop_conv)

    l4 = rectify(T.dot(l3, w4))
    l4 = dropout(l4, p_drop_hidden)

    pyx = softmax(T.dot(l4, w_o))
    return l1, l2, l3, l4, pyx 
Example #23
Source File: skipthoughts.py    From StackGAN with MIT License 6 votes vote down vote up
def nn(model, text, vectors, query, k=5):
	"""
	Return the nearest neighbour sentences to query
	text: list of sentences
	vectors: the corresponding representations for text
	query: a string to search
	"""
	qf = encode(model, [query])
	qf /= norm(qf)
	scores = numpy.dot(qf, vectors.T).flatten()
	sorted_args = numpy.argsort(scores)[::-1]
	sentences = [text[a] for a in sorted_args[:k]]
	print 'QUERY: ' + query
	print 'NEAREST: '
	for i, s in enumerate(sentences):
		print s, sorted_args[i] 
Example #24
Source File: dcgan_theano.py    From iGAN with MIT License 6 votes vote down vote up
def gen_test(_z, _params, _batchnorm, n_layers=3, n_f=128, init_sz=4, nc=3, use_tanh=False):
    if use_tanh:
        _z = tanh(_z)
    [gw0, gg0, gb0] = _params[0:3]
    hs = []
    u = _batchnorm[0]
    s = _batchnorm[n_layers + 1]
    h0 = relu(batchnorm(T.dot(T.clip(_z, -1.0, 1.0), gw0), u=u, s=s, g=gg0, b=gb0))
    h1 = h0.reshape((h0.shape[0], n_f * 2 ** n_layers, init_sz, init_sz))
    hs.extend([h0, h1])
    for n in range(n_layers):
        [w, g, b] = _params[3 * (n + 1):3 * (n + 2)]
        hin = hs[-1]
        u = _batchnorm[n + 1]
        s = _batchnorm[n + n_layers + 2]
        hout = relu(batchnorm(deconv(hin, w, subsample=(2, 2), border_mode=(2, 2)), u=u, s=s, g=g, b=b))
        hs.append(hout)
    x = deconv(hs[-1], _params[-1], subsample=(2, 2), border_mode=(2, 2))
    if nc == 3:
        x_f = tanh(x)
    if nc == 1:
        x_f = sigmoid(x)
    return x_f 
Example #25
Source File: train_dcgan_utils.py    From iGAN with MIT License 6 votes vote down vote up
def gen_test(_z, _params, _bn, n_layers=3, n_f=128, init_sz=4):
    [gw0, gg0, gb0] = _params[0:3]
    hs = []
    u = _bn[0]
    s = _bn[n_layers + 1]
    h0 = relu(batchnorm(T.dot(T.clip(_z, -1.0, 1.0), gw0), u=u, s=s, g=gg0, b=gb0))
    h1 = h0.reshape((h0.shape[0], n_f * 2 ** n_layers, init_sz, init_sz))
    hs.extend([h0, h1])
    for n in range(n_layers):
        [w, g, b] = _params[3 * (n + 1):3 * (n + 2)]
        hin = hs[-1]
        u = _bn[n + 1]
        s = _bn[n + n_layers + 2]
        hout = relu(batchnorm(deconv(hin, w, subsample=(2, 2), border_mode=(2, 2)), u=u, s=s, g=g, b=b))
        hs.append(hout)
    x = tanh(deconv(hs[-1], _params[-1], subsample=(2, 2), border_mode=(2, 2)))
    return x 
Example #26
Source File: train_dcgan_utils.py    From iGAN with MIT License 6 votes vote down vote up
def gen(_z, _params, n_layers=3, n_f=128, init_sz=4, nc=3):
    [gw0, gg0, gb0] = _params[0:3]
    hs = []
    h0 = relu(batchnorm(T.dot(_z, gw0), g=gg0, b=gb0))
    h1 = h0.reshape((h0.shape[0], n_f * 2 ** n_layers, init_sz, init_sz))
    hs.extend([h0, h1])
    for n in range(n_layers):
        [w, g, b] = _params[3 * (n + 1):3 * (n + 2)]
        hin = hs[-1]
        hout = relu(batchnorm(deconv(hin, w, subsample=(2, 2), border_mode=(2, 2)), g=g, b=b))
        hs.append(hout)
    x = deconv(hs[-1], _params[-1], subsample=(2, 2), border_mode=(2, 2))

    if nc == 3:
        x_f = tanh(x)
    if nc == 1:
        x_f = sigmoid(x)
    return x_f 
Example #27
Source File: 4_modern_net.py    From Theano-Tutorials with MIT License 5 votes vote down vote up
def model(X, w_h, w_h2, w_o, p_drop_input, p_drop_hidden):
    X = dropout(X, p_drop_input)
    h = rectify(T.dot(X, w_h))

    h = dropout(h, p_drop_hidden)
    h2 = rectify(T.dot(h, w_h2))

    h2 = dropout(h2, p_drop_hidden)
    py_x = softmax(T.dot(h2, w_o))
    return h, h2, py_x 
Example #28
Source File: nn.py    From Att-ChemdNER with Apache License 2.0 5 votes vote down vote up
def step(self,state,attended,source):
        #from theano.gradient import disconnected_grad;
        #state=disconnected_grad(state_);
        #M_state=T.dot(self.W_A_h,state) ;

        _energy=self.scoreFun(attended,state,self.W_A)
        energy=T.nnet.softmax(_energy);
        #energy=self.softmaxReScale(_energy,0.02);
        #energy=self.reScale(energy.flatten(),0.02).reshape((1,-1))
        #energyIndex=energy.flatten().argmin(axis=-1);
        glimpsed=(energy.T*source).sum(axis=0)
        #glimpsed=source[energyIndex];
        return energy.flatten(),glimpsed; 
Example #29
Source File: nn.py    From Att-ChemdNER with Apache License 2.0 5 votes vote down vote up
def CosineScore(self,attended,state,W):
#{{{
        dotProduct=T.dot(attended,state.T);
        Al2Norm=T.sqrt((attended**2).sum(axis=-1));
        Bl2Norm=T.sqrt((state**2).sum(axis=-1));
        M=dotProduct/(Al2Norm*Bl2Norm);
        _energy=T.exp(M+2);
        return _energy;
#}}} 
Example #30
Source File: 3_net.py    From Theano-Tutorials with MIT License 5 votes vote down vote up
def model(X, w_h, w_o):
    h = T.nnet.sigmoid(T.dot(X, w_h))
    pyx = T.nnet.softmax(T.dot(h, w_o))
    return pyx