Python torch.nn.functional.tanh() Examples

The following are 30 code examples of torch.nn.functional.tanh(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: cppn.py    From pde-surrogate with MIT License 6 votes vote down vote up
def __init__(self, dim_in, dim_out, dim_hidden, layers_hidden, act='tanh', 
        xavier_init=True):
        super(CPPN, self).__init__()

        self.add_module('fc0', nn.Linear(dim_in, dim_hidden, bias=None))
        self.add_module('act0', nn.Tanh())
        for i in range(1, layers_hidden):
            self.add_module('fc{}'.format(i), nn.Linear(dim_hidden, dim_hidden, bias=True))
            if act == 'tanh':
                self.add_module('act{}'.format(i), nn.Tanh())
            elif act == 'relu':
                self.add_module('act{}'.format(i), nn.ReLU())
            else:
                raise ValueError(f'unknown activation function: {act}')

        self.add_module('fc{}'.format(layers_hidden), nn.Linear(dim_hidden, dim_out))
        if xavier_init:
            self.init_xavier() 
Example #2
Source File: handcrafted_GRU.py    From PySyft with Apache License 2.0 6 votes vote down vote up
def forward(self, x, h):

        x = x.view(-1, x.shape[1])

        i_r = self.fc_ir(x)
        h_r = self.fc_hr(h)
        i_z = self.fc_iz(x)
        h_z = self.fc_hz(h)
        i_n = self.fc_in(x)
        h_n = self.fc_hn(h)

        resetgate = F.sigmoid(i_r + h_r)
        inputgate = F.sigmoid(i_z + h_z)
        newgate = F.tanh(i_n + (resetgate * h_n))

        hy = newgate + inputgate * (h - newgate)

        return hy 
Example #3
Source File: model.py    From skip-thoughts with MIT License 6 votes vote down vote up
def forward(self, sentences):
        # sentences = (batch_size, maxlen), with padding on the right.

        sentences = sentences.transpose(0, 1)  # (maxlen, batch_size)

        word_embeddings = F.tanh(self.word2embd(sentences))  # (maxlen, batch_size, word_size)

        # The following is a hack: We read embeddings in reverse. This is required to move padding to the left.
        # If reversing is not done then the RNN sees a lot a garbage values right before its final state.
        # This reversing also means that the words will be read in reverse. But this is not a big problem since
        # several sequence to sequence models for Machine Translation do similar hacks.
        rev = self.reverse_variable(word_embeddings)

        _, (thoughts, _) = self.lstm(rev)
        thoughts = thoughts[-1]  # (batch, thought_size)

        return thoughts, word_embeddings 
Example #4
Source File: nnutils.py    From icml18-jtnn with MIT License 6 votes vote down vote up
def GRU(x, h_nei, W_z, W_r, U_r, W_h):
    hidden_size = x.size()[-1]
    sum_h = h_nei.sum(dim=1)
    z_input = torch.cat([x,sum_h], dim=1)
    z = F.sigmoid(W_z(z_input))

    r_1 = W_r(x).view(-1,1,hidden_size)
    r_2 = U_r(h_nei)
    r = F.sigmoid(r_1 + r_2)
    
    gated_h = r * h_nei
    sum_gated_h = gated_h.sum(dim=1)
    h_input = torch.cat([x,sum_gated_h], dim=1)
    pre_h = F.tanh(W_h(h_input))
    new_h = (1.0 - z) * sum_h + z * pre_h
    return new_h 
Example #5
Source File: jtnn_enc.py    From icml18-jtnn with MIT License 6 votes vote down vote up
def forward(self, h, x, mess_graph):
        mask = torch.ones(h.size(0), 1)
        mask[0] = 0 #first vector is padding
        mask = create_var(mask)
        for it in xrange(self.depth):
            h_nei = index_select_ND(h, 0, mess_graph)
            sum_h = h_nei.sum(dim=1)
            z_input = torch.cat([x, sum_h], dim=1)
            z = F.sigmoid(self.W_z(z_input))

            r_1 = self.W_r(x).view(-1, 1, self.hidden_size)
            r_2 = self.U_r(h_nei)
            r = F.sigmoid(r_1 + r_2)
            
            gated_h = r * h_nei
            sum_gated_h = gated_h.sum(dim=1)
            h_input = torch.cat([x, sum_gated_h], dim=1)
            pre_h = F.tanh(self.W_h(h_input))
            h = (1.0 - z) * sum_h + z * pre_h
            h = h * mask

        return h 
Example #6
Source File: models.py    From pytorch-ppo with MIT License 6 votes vote down vote up
def forward(self, x, old=False):
        if old:
            x = F.tanh(self.module_list_old[0](x))
            x = F.tanh(self.module_list_old[1](x))

            action_mean = self.module_list_old[2](x)
            action_log_std = self.module_list_old[3].expand_as(action_mean)
            action_std = torch.exp(action_log_std)

            value = self.module_list_old[4](x)
        else:
            x = F.tanh(self.affine1(x))
            x = F.tanh(self.affine2(x))

            action_mean = self.action_mean(x)
            action_log_std = self.action_log_std.expand_as(action_mean)
            action_std = torch.exp(action_log_std)

            value = self.value_head(x)

        return action_mean, action_log_std, action_std, value 
Example #7
Source File: models.py    From pytorch-ppo with MIT License 6 votes vote down vote up
def forward(self, x, old=False):
        if old:
            x = F.tanh(self.module_list_old[0](x))
            x = F.tanh(self.module_list_old[1](x))

            action_mean = self.module_list_old[2](x)
            action_log_std = self.module_list_old[3].expand_as(action_mean)
            action_std = torch.exp(action_log_std)
        else:
            x = F.tanh(self.affine1(x))
            x = F.tanh(self.affine2(x))

            action_mean = self.action_mean(x)
            action_log_std = self.action_log_std.expand_as(action_mean)
            action_std = torch.exp(action_log_std)

        return action_mean, action_log_std, action_std 
Example #8
Source File: rnn_cell.py    From translate with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def forward(self, x, hidden):
        # get prev_t, cell_t from states
        hx, cx = hidden
        Wx = F.linear(x, self.weight_ih)
        Uz = F.linear(hx, self.weight_hh)

        # Section 2.1 in https://arxiv.org/pdf/1606.06630.pdf
        gates = self.alpha * Wx * Uz + self.beta_i * Wx + self.beta_h * Uz + self.bias

        # Same as LSTMCell after this point
        ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)

        ingate = F.sigmoid(ingate)
        forgetgate = F.sigmoid(forgetgate)
        cellgate = F.tanh(cellgate)
        outgate = F.sigmoid(outgate)

        cy = (forgetgate * cx) + (ingate * cellgate)
        hy = outgate * F.tanh(cy)

        return hy, cy 
Example #9
Source File: rnn_cell.py    From translate with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def forward(self, x, hidden):
        hx, cx = hidden
        gates = F.linear(x, self.weight_ih, self.bias_ih) + F.linear(
            hx, self.weight_hh, self.bias_hh
        )

        ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)

        ingate = F.sigmoid(self._layerNormalization(ingate))
        forgetgate = F.sigmoid(self._layerNormalization(forgetgate))
        cellgate = F.tanh(self._layerNormalization(cellgate))
        outgate = F.sigmoid(self._layerNormalization(outgate))

        cy = (forgetgate * cx) + (ingate * cellgate)

        hy = outgate * F.tanh(cy)

        return hy, cy 
Example #10
Source File: tsd_net.py    From ConvLab with MIT License 6 votes vote down vote up
def forward(self, z_enc_out, u_enc_out, u_input_np, m_t_input, degree_input, last_hidden, z_input_np):
        sparse_z_input = Variable(self.get_sparse_selective_input(z_input_np), requires_grad=False)

        m_embed = self.emb(m_t_input)
        z_context = self.attn_z(last_hidden, z_enc_out)
        u_context = self.attn_u(last_hidden, u_enc_out)
        gru_in = torch.cat([m_embed, u_context, z_context, degree_input.unsqueeze(0)], dim=2)
        gru_out, last_hidden = self.gru(gru_in, last_hidden)
        gen_score = self.proj(torch.cat([z_context, u_context, gru_out], 2)).squeeze(0)
        z_copy_score = F.tanh(self.proj_copy2(z_enc_out.transpose(0, 1)))
        z_copy_score = torch.matmul(z_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)
        z_copy_score = z_copy_score.cpu()
        z_copy_score_max = torch.max(z_copy_score, dim=1, keepdim=True)[0]
        z_copy_score = torch.exp(z_copy_score - z_copy_score_max)  # [B,T]
        z_copy_score = torch.log(torch.bmm(z_copy_score.unsqueeze(1), sparse_z_input)).squeeze(
            1) + z_copy_score_max  # [B,V]
        z_copy_score = cuda_(z_copy_score)

        scores = F.softmax(torch.cat([gen_score, z_copy_score], dim=1), dim=1)
        gen_score, z_copy_score = scores[:, :cfg.vocab_size], \
                                  scores[:, cfg.vocab_size:]
        proba = gen_score + z_copy_score[:, :cfg.vocab_size]  # [B,V]
        proba = torch.cat([proba, z_copy_score[:, cfg.vocab_size:]], 1)
        return proba, last_hidden, gru_out 
Example #11
Source File: handcrafted_GRU.py    From PySyft with Apache License 2.0 6 votes vote down vote up
def forward(self, x, h):

        x = x.view(-1, x.shape[1])

        i_r = self.fc_ir(x)
        h_r = self.fc_hr(h)
        i_z = self.fc_iz(x)
        h_z = self.fc_hz(h)
        i_n = self.fc_in(x)
        h_n = self.fc_hn(h)

        resetgate = F.sigmoid(i_r + h_r)
        inputgate = F.sigmoid(i_z + h_z)
        newgate = F.tanh(i_n + (resetgate * h_n))

        hy = newgate + inputgate * (h - newgate)

        return hy 
Example #12
Source File: layers.py    From MnemonicReader with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def pointer(self, x, state, x_mask):
        x_ = torch.cat([x, state.unsqueeze(1).repeat(1,x.size(1),1)], 2)
        s0 = F.tanh(self.linear(x_))
        s = self.weights(s0).view(x.size(0), x.size(1))
        s.data.masked_fill_(x_mask.data, -float('inf'))
        a = F.softmax(s)
        res = a.unsqueeze(1).bmm(x).squeeze(1)
        if self.normalize:
            if self.training:
                # In training we output log-softmax for NLL
                scores = F.log_softmax(s)
            else:
                # ...Otherwise 0-1 probabilities
                scores = F.softmax(s)
        else:
            scores = a.exp()
        return res, scores 
Example #13
Source File: attention.py    From pytorch-seq2seq with Apache License 2.0 6 votes vote down vote up
def forward(self, output, context):
        batch_size = output.size(0)
        hidden_size = output.size(2)
        input_size = context.size(1)
        # (batch, out_len, dim) * (batch, in_len, dim) -> (batch, out_len, in_len)
        attn = torch.bmm(output, context.transpose(1, 2))
        if self.mask is not None:
            attn.data.masked_fill_(self.mask, -float('inf'))
        attn = F.softmax(attn.view(-1, input_size), dim=1).view(batch_size, -1, input_size)

        # (batch, out_len, in_len) * (batch, in_len, dim) -> (batch, out_len, dim)
        mix = torch.bmm(attn, context)

        # concat -> (batch, out_len, 2*dim)
        combined = torch.cat((mix, output), dim=2)
        # output -> (batch, out_len, dim)
        output = F.tanh(self.linear_out(combined.view(-1, 2 * hidden_size))).view(batch_size, -1, hidden_size)

        return output, attn 
Example #14
Source File: stack_augmentation.py    From OpenChem with MIT License 6 votes vote down vote up
def forward(self, input_val, prev_stack):
        batch_size = prev_stack.size(0)

        controls = self.stack_controls_layer(input_val.squeeze(0))
        controls = F.softmax(controls, dim=1)
        controls = controls.view(-1, 3, 1, 1)
        stack_input = self.stack_input_layer(input_val)
        stack_input = F.tanh(stack_input)
        stack_input = stack_input.permute(1, 0, 2)
        zeros_at_the_bottom = torch.zeros(batch_size, 1, self.stack_width)
        if self.use_cuda:
            zeros_at_the_bottom = torch.tensor(zeros_at_the_bottom.cuda(),
                                               requires_grad=True)
        else:
            zeros_at_the_bottom = torch.tensor(zeros_at_the_bottom,
                                               requires_grad=True)
        a_push, a_pop, a_no_op = controls[:, 0], controls[:, 1], controls[:, 2]
        stack_down = torch.cat((prev_stack[:, 1:], zeros_at_the_bottom), dim=1)
        stack_up = torch.cat((stack_input, prev_stack[:, :-1]), dim=1)
        new_stack = a_no_op * prev_stack + a_push * stack_up + \
                    a_pop * stack_down
        return new_stack 
Example #15
Source File: attention.py    From Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch with MIT License 6 votes vote down vote up
def calc_score(self, att_query, att_keys):
        """
        att_query is: b x t_q x n
        att_keys is b x t_k x n
        return b x t_q x t_k scores
        """

        b, t_k, n = list(att_keys.size())
        t_q = att_query.size(1)
        if self.mode == 'bahdanau':
            att_query = att_query.unsqueeze(2).expand(b, t_q, t_k, n)
            att_keys = att_keys.unsqueeze(1).expand(b, t_q, t_k, n)
            sum_qk = att_query + att_keys
            sum_qk = sum_qk.view(b * t_k * t_q, n)
            out = self.linear_att(F.tanh(sum_qk)).view(b, t_q, t_k)
        elif self.mode == 'dot_prod':
            out = torch.bmm(att_query, att_keys.transpose(1, 2))
            if hasattr(self, 'scale'):
                out = out * self.scale
        return out 
Example #16
Source File: lstm_hard_sigmoid.py    From SemEval2019Task3 with MIT License 6 votes vote down vote up
def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
    """
    A modified LSTM cell with hard sigmoid activation on the input, forget and output gates.
    """
    hx, cx = hidden
    gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)

    ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)

    ingate = hard_sigmoid(ingate)
    forgetgate = hard_sigmoid(forgetgate)
    cellgate = F.tanh(cellgate)
    outgate = hard_sigmoid(outgate)

    cy = (forgetgate * cx) + (ingate * cellgate)
    hy = outgate * F.tanh(cy)

    return hy, cy 
Example #17
Source File: dnn.py    From MNIST-baselines with MIT License 6 votes vote down vote up
def forward(self, x):
        if self.activation == 'relu':
            if self.batch_norm:
                x = F.relu(self.bn1(self.fc1(x)))
                x = F.relu(self.bn2(self.fc2(x)))
                x = F.relu(self.bn3(self.fc3(x)))
            else:
                x = F.relu(self.fc1(x))
                x = F.relu(self.fc2(x))
                x = F.relu(self.fc3(x))

        elif self.activation == 'tanh':
            if self.batch_norm:
                x = F.tanh(self.bn1(self.fc1(x)))
                x = F.tanh(self.bn2(self.fc2(x)))
                x = F.tanh(self.bn3(self.fc3(x)))
            else:
                x = F.tanh(self.fc1(x))
                x = F.tanh(self.fc2(x))
                x = F.tanh(self.fc3(x))

        return self.fc4(x) 
Example #18
Source File: pairwise.py    From pykg2vec with MIT License 6 votes vote down vote up
def train_layer(self, h, t):
        """ Defines the forward pass training layers of the algorithm.

            Args:
               h (Tensor): Head entities ids.
               t (Tensor): Tail entity ids of the triple.
        """
        
        mr1h = torch.matmul(h, self.mr1.weight) # h => [m, self.ent_hidden_size], self.mr1 => [self.ent_hidden_size, self.rel_hidden_size]
        mr2t = torch.matmul(t, self.mr2.weight) # t => [m, self.ent_hidden_size], self.mr2 => [self.ent_hidden_size, self.rel_hidden_size]

        expanded_h = h.unsqueeze(dim=0).repeat(self.rel_hidden_size, 1, 1) # [self.rel_hidden_size, m, self.ent_hidden_size]
        expanded_t = t.unsqueeze(dim=-1) # [m, self.ent_hidden_size, 1]

        temp = (torch.matmul(expanded_h, self.mr.weight.view(self.rel_hidden_size, self.ent_hidden_size, self.ent_hidden_size))).permute(1, 0, 2) # [m, self.rel_hidden_size, self.ent_hidden_size]
        htmrt = torch.squeeze(torch.matmul(temp, expanded_t), dim=-1) # [m, self.rel_hidden_size]

        return F.tanh(htmrt + mr1h + mr2t + self.br.weight) 
Example #19
Source File: model_resnet.py    From BigGAN-pytorch with Apache License 2.0 6 votes vote down vote up
def forward(self, input, class_id):
        codes = torch.split(input, 20, 1)
        class_emb = self.linear(class_id)  # 128

        out = self.G_linear(codes[0])
        # out = out.view(-1, 1536, 4, 4)
        out = out.view(-1, self.first_view, 4, 4)
        ids = 1
        for i, conv in enumerate(self.conv):
            if isinstance(conv, GBlock):
                
                conv_code = codes[ids]
                ids = ids+1
                condition = torch.cat([conv_code, class_emb], 1)
                # print('condition',condition.size()) #torch.Size([4, 148])
                out = conv(out, condition)

            else:
                out = conv(out)

        out = self.ScaledCrossReplicaBN(out)
        out = F.relu(out)
        out = self.colorize(out)

        return F.tanh(out) 
Example #20
Source File: BinaryTreeBasedModule.py    From latent-treelstm with MIT License 6 votes vote down vote up
def _transform_leafs(self, x, mask):
        if self.leaf_transformation == BinaryTreeBasedModule.no_transformation:
            pass
        elif self.leaf_transformation == BinaryTreeBasedModule.lstm_transformation:
            x = self.lstm(x, mask)
        elif self.leaf_transformation == BinaryTreeBasedModule.bi_lstm_transformation:
            h_f = self.lstm_f(x, mask)
            h_b = self.lstm_b(x, mask, backward=True)
            x = torch.cat([h_f, h_b], dim=-1)
        elif self.leaf_transformation == BinaryTreeBasedModule.conv_transformation:
            x = x.permute(0, 2, 1)
            x = self.conv1(x)
            x = F.relu(x)
            x = self.conv2(x)
            x = F.tanh(x)
            x = x.permute(0, 2, 1)
        # tanh is applied to make sure that leafs and other nodes are in the same range
        return self.linear(x).tanh().chunk(chunks=2, dim=-1) 
Example #21
Source File: lstm.py    From crosentgec with GNU General Public License v3.0 6 votes vote down vote up
def forward(self, input, source_hids, encoder_padding_mask):
        # input: bsz x input_embed_dim
        # source_hids: srclen x bsz x output_embed_dim

        # x: bsz x output_embed_dim
        x = self.input_proj(input)

        # compute attention
        attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2)

        # don't attend over padding
        if encoder_padding_mask is not None:
            attn_scores = attn_scores.float().masked_fill_(
                encoder_padding_mask,
                float('-inf')
            ).type_as(attn_scores)  # FP16 support: cast to float and back

        attn_scores = F.softmax(attn_scores, dim=0)  # srclen x bsz

        # sum weighted sources
        x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0)

        x = F.tanh(self.output_proj(torch.cat((x, input), dim=1)))
        return x, attn_scores 
Example #22
Source File: model_resnet.py    From sagan-pytorch with Apache License 2.0 6 votes vote down vote up
def forward(self, input, class_id):
        out = self.lin_code(input)
        out = out.view(-1, 512, 4, 4)

        for conv in self.conv:
            if isinstance(conv, ConvBlock):
                out = conv(out, class_id)

            else:
                out = conv(out)

        out = self.bn(out)
        out = F.relu(out)
        out = self.colorize(out)

        return F.tanh(out) 
Example #23
Source File: seq2seq_atten.py    From video_captioning_rl with MIT License 6 votes vote down vote up
def forward(self, dec_state, enc_states, mask, dag=None):
        """
        :param dec_state: 
            decoder hidden state of size batch_size x dec_dim
        :param enc_states:
            all encoder hidden states of size batch_size x max_enc_steps x enc_dim
        :param flengths:
            encoder video frame lengths of size batch_size
        """
        dec_contrib = self.decoder_in(dec_state)
        batch_size, max_enc_steps, _  = enc_states.size()
        enc_contrib = self.encoder_in(enc_states.contiguous().view(-1, self.enc_dim)).contiguous().view(batch_size, max_enc_steps, self.attn_dim)
        pre_attn = F.tanh(enc_contrib + dec_contrib.unsqueeze(1).expand_as(enc_contrib))
       
        
        energy = self.attn_linear(pre_attn.view(-1, self.attn_dim)).view(batch_size, max_enc_steps)
        alpha = F.softmax(energy, 1)
        # mask alpha and renormalize it
        alpha = alpha* mask
        alpha = torch.div(alpha, alpha.sum(1).unsqueeze(1).expand_as(alpha))

        context_vector = torch.bmm(alpha.unsqueeze(1), enc_states).squeeze(1) # (batch_size, enc_dim)

        return context_vector, alpha 
Example #24
Source File: model.py    From visDial.pytorch with MIT License 6 votes vote down vote up
def forward(self, input_feat, idx, hidden, vocab_size):

        output, _ = self.rnn(input_feat, hidden)
        mask = idx.data.eq(0)  # generate the mask
        mask[idx.data == vocab_size] = 1 # also set the last token to be 1
        if isinstance(input_feat, Variable):
            mask = Variable(mask, volatile=input_feat.volatile)

        # Doing self attention here.
        atten = self.W2(F.dropout(F.tanh(self.W1(output.view(-1, self.nhid))), self.d, training=self.training)).view(idx.size())
        atten.masked_fill_(mask, -99999)
        weight = F.softmax(atten.t()).view(-1,1,idx.size(0))
        feat = torch.bmm(weight, output.transpose(0,1)).view(-1,self.nhid)
        feat = F.dropout(feat, self.d, training=self.training)
        transform_output = F.tanh(self.fc(feat))

        return transform_output 
Example #25
Source File: gat_layers.py    From DeepInf with MIT License 6 votes vote down vote up
def forward(self, h, adj):
        bs, n = h.size()[:2] # h is of size bs x n x f_in
        h_prime = torch.matmul(h.unsqueeze(1), self.w) # bs x n_head x n x f_out
        attn_src = torch.matmul(F.tanh(h_prime), self.a_src) # bs x n_head x n x 1
        attn_dst = torch.matmul(F.tanh(h_prime), self.a_dst) # bs x n_head x n x 1
        attn = attn_src.expand(-1, -1, -1, n) + attn_dst.expand(-1, -1, -1, n).permute(0, 1, 3, 2) # bs x n_head x n x n

        attn = self.leaky_relu(attn)
        mask = 1 - adj.unsqueeze(1) # bs x 1 x n x n
        attn.data.masked_fill_(mask, float("-inf"))
        attn = self.softmax(attn) # bs x n_head x n x n
        attn = self.dropout(attn)
        output = torch.matmul(attn, h_prime) # bs x n_head x n x f_out
        if self.bias is not None:
            return output + self.bias
        else:
            return output 
Example #26
Source File: encoder_QI.py    From visDial.pytorch with MIT License 6 votes vote down vote up
def forward(self, ques_emb, img_raw, ques_hidden, rnd):

        img_emb = F.tanh(self.img_embed(img_raw))

        ques_feat, ques_hidden = self.ques_rnn(ques_emb, ques_hidden)
        #ques_feat = F.dropout(ques_feat[-1], self.d, training=self.training)
        ques_feat = ques_feat[-1]

        ques_emb_2 = self.Wq_2(ques_feat).view(-1, 1, self.nhid)
        img_emb_2 = self.Wi_2(img_emb).view(-1, 49, self.nhid)

        atten_emb_2 = F.tanh(img_emb_2 + ques_emb_2.expand_as(img_emb_2))

        img_atten_weight = F.softmax(self.Wa_2(F.dropout(atten_emb_2, self.d, training=self.training
                                                ).view(-1, self.nhid)).view(-1, 49))

        img_attn_feat = torch.bmm(img_atten_weight.view(-1, 1, 49),
                                        img_emb.view(-1, 49, self.nhid))

        concat_feat = F.dropout(torch.cat((img_attn_feat.view(-1, self.nhid), ques_feat), 1), self.d, training=self.training)
        encoder_feat = F.tanh(self.fc1(concat_feat))


        return encoder_feat, ques_hidden 
Example #27
Source File: pairwise.py    From pykg2vec with MIT License 5 votes vote down vote up
def layer(self, h, t):
        """Defines the forward pass layer of the algorithm.

          Args:
              h (Tensor): Head entities ids.
              t (Tensor): Tail entity ids of the triple.
        """       
        mr1h = torch.matmul(h, self.mr1.weight) # h => [m, d], self.mr1 => [d, k]
        mr2t = torch.matmul(t, self.mr2.weight) # t => [m, d], self.mr2 => [d, k]
        return torch.tanh(mr1h + mr2t) 
Example #28
Source File: cppn.py    From pde-surrogate with MIT License 5 votes vote down vote up
def __init__(self, dim_in, dim_out, dim_hidden, res_layers, act='tanh'):
        super().__init__()
        self.add_module('fc0', nn.Linear(dim_in, dim_hidden, bias=None))

        for i in range(res_layers):
            reslayer = _ResLayer(dim_hidden, dim_hidden, dim_hidden, act=act)
            self.add_module(f'reslayer{i+1}', reslayer)
        
        self.add_module('act_last', activation(act))
        self.add_module('fc_last', nn.Linear(dim_hidden, dim_out, bias=True)) 
Example #29
Source File: networks.py    From leap with MIT License 5 votes vote down vote up
def forward(self, obs):
        h = F.relu(self.fc1(obs))
        h = F.relu(self.fc2(h))
        return F.tanh(self.last_fc(h)) 
Example #30
Source File: networks.py    From leap with MIT License 5 votes vote down vote up
def __init__(self, *args, **kwargs):
        self.save_init_params(locals())
        super().__init__(*args, output_activation=torch.tanh, **kwargs)