Python torch.nn.Tanh() Examples

The following are 30 code examples of torch.nn.Tanh(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: decoder.py    From DDPAE-video-prediction with MIT License 7 votes vote down vote up
def __init__(self, input_size, n_channels, ngf, n_layers, activation='tanh'):
    super(ImageDecoder, self).__init__()

    ngf = ngf * (2 ** (n_layers - 2))
    layers = [nn.ConvTranspose2d(input_size, ngf, 4, 1, 0, bias=False),
              nn.BatchNorm2d(ngf),
              nn.ReLU(True)]

    for i in range(1, n_layers - 1):
      layers += [nn.ConvTranspose2d(ngf, ngf // 2, 4, 2, 1, bias=False),
                 nn.BatchNorm2d(ngf // 2),
                 nn.ReLU(True)]
      ngf = ngf // 2

    layers += [nn.ConvTranspose2d(ngf, n_channels, 4, 2, 1, bias=False)]
    if activation == 'tanh':
      layers += [nn.Tanh()]
    elif activation == 'sigmoid':
      layers += [nn.Sigmoid()]
    else:
      raise NotImplementedError

    self.main = nn.Sequential(*layers) 
Example #2
Source File: main_pytorch.py    From deep_architect with MIT License 7 votes vote down vote up
def nonlinearity(h_nonlin_name):

    def Nonlinearity(nonlin_name):
        if nonlin_name == 'relu':
            m = nn.ReLU()
        elif nonlin_name == 'tanh':
            m = nn.Tanh()
        elif nonlin_name == 'elu':
            m = nn.ELU()
        else:
            raise ValueError

        return m

    return hpt.siso_pytorch_module_from_pytorch_layer_fn(
        Nonlinearity, {'nonlin_name': h_nonlin_name}) 
Example #3
Source File: aggregator_predict.py    From SQLNet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, N_word, N_h, N_depth, use_ca):
        super(AggPredictor, self).__init__()
        self.use_ca = use_ca

        self.agg_lstm = nn.LSTM(input_size=N_word, hidden_size=N_h/2,
                num_layers=N_depth, batch_first=True,
                dropout=0.3, bidirectional=True)
        if use_ca:
            print "Using column attention on aggregator predicting"
            self.agg_col_name_enc = nn.LSTM(input_size=N_word,
                    hidden_size=N_h/2, num_layers=N_depth,
                    batch_first=True, dropout=0.3, bidirectional=True)
            self.agg_att = nn.Linear(N_h, N_h)
        else:
            print "Not using column attention on aggregator predicting"
            self.agg_att = nn.Linear(N_h, 1)
        self.agg_out = nn.Sequential(nn.Linear(N_h, N_h),
                nn.Tanh(), nn.Linear(N_h, 6))
        self.softmax = nn.Softmax() 
Example #4
Source File: dcgan_generator.py    From Pytorch-Project-Template with MIT License 6 votes vote down vote up
def __init__(self, config):
        super().__init__()
        self.config = config

        self.relu = nn.ReLU(inplace=True)

        self.deconv1 = nn.ConvTranspose2d(in_channels=self.config.g_input_size, out_channels=self.config.num_filt_g * 8, kernel_size=4, stride=1, padding=0, bias=False)
        self.batch_norm1 = nn.BatchNorm2d(self.config.num_filt_g*8)

        self.deconv2 = nn.ConvTranspose2d(in_channels=self.config.num_filt_g * 8, out_channels=self.config.num_filt_g * 4, kernel_size=4, stride=2, padding=1, bias=False)
        self.batch_norm2 = nn.BatchNorm2d(self.config.num_filt_g*4)

        self.deconv3 = nn.ConvTranspose2d(in_channels=self.config.num_filt_g * 4, out_channels=self.config.num_filt_g * 2, kernel_size=4, stride=2, padding=1, bias=False)
        self.batch_norm3 = nn.BatchNorm2d(self.config.num_filt_g*2)

        self.deconv4 = nn.ConvTranspose2d(in_channels=self.config.num_filt_g * 2, out_channels=self.config.num_filt_g , kernel_size=4, stride=2, padding=1, bias=False)
        self.batch_norm4 = nn.BatchNorm2d(self.config.num_filt_g)

        self.deconv5 = nn.ConvTranspose2d(in_channels=self.config.num_filt_g, out_channels=self.config.input_channels, kernel_size=4, stride=2, padding=1, bias=False)

        self.out = nn.Tanh()

        self.apply(weights_init) 
Example #5
Source File: aggregator_predict.py    From SQL_Database_Optimization with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, N_word, N_h, N_depth, use_ca):
        super(AggPredictor, self).__init__()
        self.use_ca = use_ca

        self.agg_lstm = nn.LSTM(input_size=N_word, hidden_size=N_h/2,
                num_layers=N_depth, batch_first=True,
                dropout=0.3, bidirectional=True)
        if use_ca:
            print "Using column attention on aggregator predicting"
            self.agg_col_name_enc = nn.LSTM(input_size=N_word,
                    hidden_size=N_h/2, num_layers=N_depth,
                    batch_first=True, dropout=0.3, bidirectional=True)
            self.agg_att = nn.Linear(N_h, N_h)
        else:
            print "Not using column attention on aggregator predicting"
            self.agg_att = nn.Linear(N_h, 1)
        self.agg_out = nn.Sequential(nn.Linear(N_h, N_h),
                nn.Tanh(), nn.Linear(N_h, 6))
        self.softmax = nn.Softmax() 
Example #6
Source File: set2set.py    From LanczosNetwork with MIT License 6 votes vote down vote up
def __init__(self, hidden_dim):
    """ Implementation of customized LSTM for set2set """
    super(Set2SetLSTM, self).__init__()
    self.hidden_dim = hidden_dim
    self.forget_gate = nn.Sequential(
        *[nn.Linear(2 * self.hidden_dim, self.hidden_dim),
          nn.Sigmoid()])
    self.input_gate = nn.Sequential(
        *[nn.Linear(2 * self.hidden_dim, self.hidden_dim),
          nn.Sigmoid()])
    self.output_gate = nn.Sequential(
        *[nn.Linear(2 * self.hidden_dim, self.hidden_dim),
          nn.Sigmoid()])
    self.memory_gate = nn.Sequential(
        *[nn.Linear(2 * self.hidden_dim, self.hidden_dim),
          nn.Tanh()])

    self._init_param() 
Example #7
Source File: flows.py    From pytorch-flows with MIT License 6 votes vote down vote up
def __init__(self,
                 num_inputs,
                 num_hidden,
                 num_cond_inputs=None,
                 act='relu',
                 pre_exp_tanh=False):
        super(MADE, self).__init__()

        activations = {'relu': nn.ReLU, 'sigmoid': nn.Sigmoid, 'tanh': nn.Tanh}
        act_func = activations[act]

        input_mask = get_mask(
            num_inputs, num_hidden, num_inputs, mask_type='input')
        hidden_mask = get_mask(num_hidden, num_hidden, num_inputs)
        output_mask = get_mask(
            num_hidden, num_inputs * 2, num_inputs, mask_type='output')

        self.joiner = nn.MaskedLinear(num_inputs, num_hidden, input_mask,
                                      num_cond_inputs)

        self.trunk = nn.Sequential(act_func(),
                                   nn.MaskedLinear(num_hidden, num_hidden,
                                                   hidden_mask), act_func(),
                                   nn.MaskedLinear(num_hidden, num_inputs * 2,
                                                   output_mask)) 
Example #8
Source File: generators.py    From cycleGAN-PyTorch with MIT License 6 votes vote down vote up
def __init__(self, input_nc=3, output_nc=3, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=True, num_blocks=6):
        super(ResnetGenerator, self).__init__()
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm2d
        else:
            use_bias = norm_layer == nn.InstanceNorm2d

        res_model = [nn.ReflectionPad2d(3),
                    conv_norm_relu(input_nc, ngf * 1, 7, norm_layer=norm_layer, bias=use_bias),
                    conv_norm_relu(ngf * 1, ngf * 2, 3, 2, 1, norm_layer=norm_layer, bias=use_bias),
                    conv_norm_relu(ngf * 2, ngf * 4, 3, 2, 1, norm_layer=norm_layer, bias=use_bias)]

        for i in range(num_blocks):
            res_model += [ResidualBlock(ngf * 4, norm_layer, use_dropout, use_bias)]

        res_model += [dconv_norm_relu(ngf * 4, ngf * 2, 3, 2, 1, 1, norm_layer=norm_layer, bias=use_bias),
                      dconv_norm_relu(ngf * 2, ngf * 1, 3, 2, 1, 1, norm_layer=norm_layer, bias=use_bias),
                      nn.ReflectionPad2d(3),
                      nn.Conv2d(ngf, output_nc, 7),
                      nn.Tanh()]
        self.res_model = nn.Sequential(*res_model) 
Example #9
Source File: classifier.py    From ConvLab with MIT License 6 votes vote down vote up
def __init__(self, input_dropout_p, rnn_cell, input_size, hidden_size, num_layers, output_dropout_p, bidirectional, variable_lengths):
        super(EncoderGRUATTN, self).__init__(input_dropout_p=input_dropout_p, 
                                             rnn_cell=rnn_cell, 
                                             input_size=input_size, 
                                             hidden_size=hidden_size, 
                                             num_layers=num_layers, 
                                             output_dropout_p=output_dropout_p, 
                                             bidirectional=bidirectional)
        self.variable_lengths = variable_lengths
        self.nhid_attn = hidden_size
        self.output_size = hidden_size*2 if bidirectional else hidden_size

        # attention to combine selection hidden states
        self.attn = nn.Sequential(
            nn.Linear(2 * hidden_size, hidden_size), 
            nn.Tanh(), 
            nn.Linear(hidden_size, 1)
        ) 
Example #10
Source File: model.py    From StackGAN-Pytorch with MIT License 6 votes vote down vote up
def define_module(self):
        ninput = self.z_dim + self.ef_dim
        ngf = self.gf_dim
        # TEXT.DIMENSION -> GAN.CONDITION_DIM
        self.ca_net = CA_NET()

        # -> ngf x 4 x 4
        self.fc = nn.Sequential(
            nn.Linear(ninput, ngf * 4 * 4, bias=False),
            nn.BatchNorm1d(ngf * 4 * 4),
            nn.ReLU(True))

        # ngf x 4 x 4 -> ngf/2 x 8 x 8
        self.upsample1 = upBlock(ngf, ngf // 2)
        # -> ngf/4 x 16 x 16
        self.upsample2 = upBlock(ngf // 2, ngf // 4)
        # -> ngf/8 x 32 x 32
        self.upsample3 = upBlock(ngf // 4, ngf // 8)
        # -> ngf/16 x 64 x 64
        self.upsample4 = upBlock(ngf // 8, ngf // 16)
        # -> 3 x 64 x 64
        self.img = nn.Sequential(
            conv3x3(ngf // 16, 3),
            nn.Tanh()) 
Example #11
Source File: model.py    From ggnn.pytorch with MIT License 6 votes vote down vote up
def __init__(self, state_dim, n_node, n_edge_types):
        super(Propogator, self).__init__()

        self.n_node = n_node
        self.n_edge_types = n_edge_types

        self.reset_gate = nn.Sequential(
            nn.Linear(state_dim*3, state_dim),
            nn.Sigmoid()
        )
        self.update_gate = nn.Sequential(
            nn.Linear(state_dim*3, state_dim),
            nn.Sigmoid()
        )
        self.tansform = nn.Sequential(
            nn.Linear(state_dim*3, state_dim),
            nn.Tanh()
        ) 
Example #12
Source File: dipvae_utils.py    From AIX360 with Apache License 2.0 6 votes vote down vote up
def __init__(self, num_nodes=50, ip_dim=1, op_dim=1, activation_type='relu', args=None):
        super(FCNet, self).__init__()
        self.args = args
        if activation_type == 'relu':
            self.activation = nn.ReLU()
        elif activation_type == 'tanh':
            self.activation = nn.Tanh()
        else:
            print("Activation Type not supported")
            return
        layer = Linear
        self.fc_hidden = []
        self.fc1 = layer(ip_dim, num_nodes)
        self.bn1 = nn.BatchNorm1d(num_nodes)
        for _ in np.arange(self.args.num_layers - 1):
            self.fc_hidden.append(layer(num_nodes, num_nodes))
            self.fc_hidden.append(nn.BatchNorm1d(num_nodes))
            self.fc_hidden.append(self.activation)
        self.features = nn.Sequential(*self.fc_hidden)
        self.fc_out = layer(num_nodes, op_dim) 
Example #13
Source File: MyNet.py    From sgd-influence with MIT License 6 votes vote down vote up
def __init__(self, device, m=[24, 12]):
        super(MnistAE, self).__init__()
        self.m = m
        self.encoder = nn.Sequential(
            nn.Conv2d(1, self.m[0], 3, stride=1, padding=0),
            nn.ReLU(True),
            nn.MaxPool2d(2, stride=2, padding=0),
            nn.Conv2d(self.m[0], self.m[1], 3, stride=1, padding=0),
            nn.ReLU(True),
            nn.MaxPool2d(2, stride=1, padding=0)
        )
        self.decoder = nn.Sequential(
            nn.ConvTranspose2d(self.m[1], self.m[1], 5, stride=2, padding=0),
            nn.ReLU(True),
            nn.ConvTranspose2d(self.m[1], self.m[0], 4, stride=1, padding=0),
            nn.ReLU(True),
            nn.ConvTranspose2d(self.m[0], 1, 3, stride=1, padding=0),
            nn.Tanh()
        ) 
Example #14
Source File: MyNet.py    From sgd-influence with MIT License 6 votes vote down vote up
def __init__(self, device, m=[64, 32, 16]):
        super(CifarAE, self).__init__()
        self.m = m
        self.mm = np.array((0.4914, 0.4822, 0.4465))[np.newaxis, :, np.newaxis, np.newaxis]
        self.ss = np.array((0.2023, 0.1994, 0.2010))[np.newaxis, :, np.newaxis, np.newaxis]
        self.mm = torch.from_numpy(self.mm).float().to(device)
        self.ss = torch.from_numpy(self.ss).float().to(device)
        self.encoder = nn.Sequential(
            nn.Conv2d(3, self.m[0], 3, stride=1, padding=0),
            nn.ReLU(True),
            nn.MaxPool2d(2, stride=2, padding=0),
            nn.Conv2d(self.m[0], self.m[1], 3, stride=1, padding=0),
            nn.ReLU(True),
            nn.MaxPool2d(2, stride=1, padding=0)
        )
        self.decoder = nn.Sequential(
            nn.ConvTranspose2d(self.m[1], self.m[1], 5, stride=2, padding=0),
            nn.ReLU(True),
            nn.ConvTranspose2d(self.m[1], self.m[0], 4, stride=1, padding=0),
            nn.ReLU(True),
            nn.ConvTranspose2d(self.m[0], 3, 3, stride=1, padding=0),
            nn.Tanh()
        ) 
Example #15
Source File: GlobalAttention.py    From video-caption-openNMT.pytorch with MIT License 6 votes vote down vote up
def __init__(self, dim, coverage=False, attn_type="dot"):
        super(GlobalAttention, self).__init__()

        self.dim = dim
        self.attn_type = attn_type
        assert (self.attn_type in ["dot", "general", "mlp"]), (
                "Please select a valid attention type.")

        if self.attn_type == "general":
            self.linear_in = nn.Linear(dim, dim, bias=False)
        elif self.attn_type == "mlp":
            self.linear_context = nn.Linear(dim, dim, bias=False)
            self.linear_query = nn.Linear(dim, dim, bias=True)
            self.v = nn.Linear(dim, 1, bias=False)
        # mlp wants it with bias
        out_bias = self.attn_type == "mlp"
        self.linear_out = nn.Linear(dim*2, dim, bias=out_bias)

        self.sm = nn.Softmax(dim=-1)
        self.tanh = nn.Tanh()

        if coverage:
            self.linear_cover = nn.Linear(1, dim, bias=False) 
Example #16
Source File: grassmodel.py    From grass_pytorch with Apache License 2.0 5 votes vote down vote up
def __init__(self, feature_size, hidden_size):
        super(SampleDecoder, self).__init__()
        self.mlp1 = nn.Linear(feature_size, hidden_size)
        self.mlp2 = nn.Linear(hidden_size, feature_size)
        self.tanh = nn.Tanh() 
Example #17
Source File: grassmodel.py    From grass_pytorch with Apache License 2.0 5 votes vote down vote up
def __init__(self, feature_size, hidden_size):
        super(NodeClassifier, self).__init__()
        self.mlp1 = nn.Linear(feature_size, hidden_size)
        self.tanh = nn.Tanh()
        self.mlp2 = nn.Linear(hidden_size, 3)
        #self.softmax = nn.Softmax() 
Example #18
Source File: grassmodel.py    From grass_pytorch with Apache License 2.0 5 votes vote down vote up
def __init__(self, feature_size, symmetry_size, hidden_size):
        super(SymDecoder, self).__init__()
        self.mlp = nn.Linear(feature_size, hidden_size) # layer for decoding a feature vector 
        self.tanh = nn.Tanh()
        self.mlp_sg = nn.Linear(hidden_size, feature_size) # layer for outputing the feature of symmetry generator
        self.mlp_sp = nn.Linear(hidden_size, symmetry_size) # layer for outputing the vector of symmetry parameter 
Example #19
Source File: grassmodel.py    From grass_pytorch with Apache License 2.0 5 votes vote down vote up
def __init__(self, feature_size, hidden_size):
        super(AdjEncoder, self).__init__()
        self.left = nn.Linear(feature_size, hidden_size)
        self.right = nn.Linear(feature_size, hidden_size, bias=False)
        self.second = nn.Linear(hidden_size, feature_size)
        self.tanh = nn.Tanh() 
Example #20
Source File: grassmodel.py    From grass_pytorch with Apache License 2.0 5 votes vote down vote up
def __init__(self, input_size, feature_size):
        super(BoxEncoder, self).__init__()
        self.encoder = nn.Linear(input_size, feature_size)
        self.tanh = nn.Tanh() 
Example #21
Source File: grassmodel.py    From grass_pytorch with Apache License 2.0 5 votes vote down vote up
def __init__(self, feature_size, hidden_size):
        super(Sampler, self).__init__()
        self.mlp1 = nn.Linear(feature_size, hidden_size)
        self.mlp2mu = nn.Linear(hidden_size, feature_size)
        self.mlp2var = nn.Linear(hidden_size, feature_size)
        self.tanh = nn.Tanh() 
Example #22
Source File: grassmodel.py    From grass_pytorch with Apache License 2.0 5 votes vote down vote up
def __init__(self, feature_size, symmetry_size, hidden_size):
        super(SymEncoder, self).__init__()
        self.left = nn.Linear(feature_size, hidden_size)
        self.right = nn.Linear(symmetry_size, hidden_size)
        self.second = nn.Linear(hidden_size, feature_size)
        self.tanh = nn.Tanh() 
Example #23
Source File: grassmodel.py    From grass_pytorch with Apache License 2.0 5 votes vote down vote up
def __init__(self, feature_size, box_size):
        super(BoxDecoder, self).__init__()
        self.mlp = nn.Linear(feature_size, box_size)
        self.tanh = nn.Tanh() 
Example #24
Source File: grassmodel.py    From grass_pytorch with Apache License 2.0 5 votes vote down vote up
def __init__(self, feature_size, hidden_size):
        super(Sampler, self).__init__()
        self.mlp1 = nn.Linear(feature_size, hidden_size)
        self.mlp2mu = nn.Linear(hidden_size, feature_size)
        self.mlp2var = nn.Linear(hidden_size, feature_size)
        self.tanh = nn.Tanh() 
Example #25
Source File: grassmodel.py    From grass_pytorch with Apache License 2.0 5 votes vote down vote up
def __init__(self, input_size, feature_size):
        super(BoxEncoder, self).__init__()
        self.encoder = nn.Linear(input_size, feature_size)
        self.tanh = nn.Tanh() 
Example #26
Source File: grassmodel.py    From grass_pytorch with Apache License 2.0 5 votes vote down vote up
def __init__(self, feature_size, symmetry_size, hidden_size):
        super(SymEncoder, self).__init__()
        self.left = nn.Linear(feature_size, hidden_size)
        self.right = nn.Linear(symmetry_size, hidden_size)
        self.second = nn.Linear(hidden_size, feature_size)
        self.tanh = nn.Tanh() 
Example #27
Source File: modeling.py    From cmrc2019 with Creative Commons Attribution Share Alike 4.0 International 5 votes vote down vote up
def __init__(self, config):
        super(BertPooler, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.activation = nn.Tanh() 
Example #28
Source File: modeling_utils.py    From BERT-Relation-Extraction with Apache License 2.0 5 votes vote down vote up
def __init__(self, config):
        super(PoolerAnswerClass, self).__init__()
        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
        self.activation = nn.Tanh()
        self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False) 
Example #29
Source File: modeling_utils.py    From BERT-Relation-Extraction with Apache License 2.0 5 votes vote down vote up
def __init__(self, config):
        super(PoolerEndLogits, self).__init__()
        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
        self.activation = nn.Tanh()
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dense_1 = nn.Linear(config.hidden_size, 1) 
Example #30
Source File: grassmodel.py    From grass_pytorch with Apache License 2.0 5 votes vote down vote up
def __init__(self, feature_size, hidden_size):
        super(AdjDecoder, self).__init__()
        self.mlp = nn.Linear(feature_size, hidden_size)
        self.mlp_left = nn.Linear(hidden_size, feature_size)
        self.mlp_right = nn.Linear(hidden_size, feature_size)
        self.tanh = nn.Tanh()