Python torch.nn.Softmax() Examples

The following are 30 code examples of torch.nn.Softmax(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: seq2sql_condition_predict.py    From SQL_Database_Optimization with BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def __init__(self, N_word, N_h, N_depth, max_col_num, max_tok_num, gpu):
        super(Seq2SQLCondPredictor, self).__init__()
        print "Seq2SQL where prediction"
        self.N_h = N_h
        self.max_tok_num = max_tok_num
        self.max_col_num = max_col_num
        self.gpu = gpu

        self.cond_lstm = nn.LSTM(input_size=N_word, hidden_size=N_h/2,
                num_layers=N_depth, batch_first=True,
                dropout=0.3, bidirectional=True)
        self.cond_decoder = nn.LSTM(input_size=self.max_tok_num,
                hidden_size=N_h, num_layers=N_depth,
                batch_first=True, dropout=0.3)

        self.cond_out_g = nn.Linear(N_h, N_h)
        self.cond_out_h = nn.Linear(N_h, N_h)
        self.cond_out = nn.Sequential(nn.Tanh(), nn.Linear(N_h, 1))

        self.softmax = nn.Softmax() 
Example #2
Source File: MultiHeadedAttn.py    From video-caption-openNMT.pytorch with MIT License 6 votes vote down vote up
def __init__(self, head_count, model_dim, dropout=0.1):
        assert model_dim % head_count == 0
        self.dim_per_head = model_dim // head_count
        self.model_dim = model_dim

        super(MultiHeadedAttention, self).__init__()
        self.head_count = head_count

        self.linear_keys = nn.Linear(model_dim,
                                     head_count * self.dim_per_head)
        self.linear_values = nn.Linear(model_dim,
                                       head_count * self.dim_per_head)
        self.linear_query = nn.Linear(model_dim,
                                      head_count * self.dim_per_head)
        self.sm = nn.Softmax(dim=-1)
        self.dropout = nn.Dropout(dropout)
        self.final_linear = nn.Linear(model_dim, model_dim) 
Example #3
Source File: ResNeXt2016.py    From Pytorch-Networks with MIT License 6 votes vote down vote up
def __init__(self,block,block_list,cardinality):
        super(ResNet,self).__init__()
        self.head_conv = nn.Sequential(
            nn.Conv2d(3,64,7,2,3,bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),)
        self.maxpool_1 = nn.MaxPool2d(3,2,1)
        b_ = block.expansion
        self.layer_1 = self._make_layer(block,64,128*b_,block_list[0],1,cardinality)
        self.layer_2 = self._make_layer(block,128*b_,256*b_,block_list[1],2,cardinality)
        self.layer_3 = self._make_layer(block,256*b_,512*b_,block_list[2],2,cardinality)
        self.layer_4 = self._make_layer(block,512*b_,1024*b_,block_list[3],2,cardinality)
        self.avgpool_1 = nn.AdaptiveAvgPool2d((1,1))
        self.fc_1 = nn.Sequential(
            nn.Flatten(),
            nn.Linear(1024*b_,1000),
            nn.Softmax(dim = 1),)
        self._initialization() 
Example #4
Source File: ReadoutFunction.py    From nmp_qc with MIT License 6 votes vote down vote up
def r_duvenaud(self, h):
        # layers
        aux = []
        for l in range(len(h)):
            param_sz = self.learn_args[l].size()
            parameter_mat = torch.t(self.learn_args[l])[None, ...].expand(h[l].size(0), param_sz[1],
                                                                                      param_sz[0])

            aux.append(torch.transpose(torch.bmm(parameter_mat, torch.transpose(h[l], 1, 2)), 1, 2))

            for j in range(0, aux[l].size(1)):
                # Mask whole 0 vectors
                aux[l][:, j, :] = nn.Softmax()(aux[l][:, j, :].clone())*(torch.sum(aux[l][:, j, :] != 0, 1) > 0).expand_as(aux[l][:, j, :]).type_as(aux[l])

        aux = torch.sum(torch.sum(torch.stack(aux, 3), 3), 1)
        return self.learn_modules[0](torch.squeeze(aux)) 
Example #5
Source File: softmax_nn.py    From OpenNRE with MIT License 6 votes vote down vote up
def __init__(self, sentence_encoder, num_class, rel2id):
        """
        Args:
            sentence_encoder: encoder for sentences
            num_class: number of classes
            id2rel: dictionary of id -> relation name mapping
        """
        super().__init__()
        self.sentence_encoder = sentence_encoder
        self.num_class = num_class
        self.fc = nn.Linear(self.sentence_encoder.hidden_size, num_class)
        self.softmax = nn.Softmax(-1)
        self.rel2id = rel2id
        self.id2rel = {}
        self.drop = nn.Dropout()
        for rel, id in rel2id.items():
            self.id2rel[id] = rel 
Example #6
Source File: bag_attention.py    From OpenNRE with MIT License 6 votes vote down vote up
def __init__(self, sentence_encoder, num_class, rel2id):
        """
        Args:
            sentence_encoder: encoder for sentences
            num_class: number of classes
            id2rel: dictionary of id -> relation name mapping
        """
        super().__init__()
        self.sentence_encoder = sentence_encoder
        self.num_class = num_class
        self.fc = nn.Linear(self.sentence_encoder.hidden_size, num_class)
        self.softmax = nn.Softmax(-1)
        self.rel2id = rel2id
        self.id2rel = {}
        self.drop = nn.Dropout()
        for rel, id in rel2id.items():
            self.id2rel[id] = rel 
Example #7
Source File: ResNetV2.py    From Pytorch-Networks with MIT License 6 votes vote down vote up
def __init__(self,block,block_list):
        super(ResNet,self).__init__()
        self.head_conv = nn.Sequential(
            nn.Conv2d(3,64,7,2,3,bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),)
        self.maxpool_1 = nn.MaxPool2d(3,2,1)
        b_ = block.expansion
        self.layer_1 = self._make_layer(block,64,64*b_,block_list[0],1)
        self.layer_2 = self._make_layer(block,64*b_,128*b_,block_list[1],2)
        self.layer_3 = self._make_layer(block,128*b_,256*b_,block_list[2],2)
        self.layer_4 = self._make_layer(block,256*b_,512*b_,block_list[3],2)
        self.avgpool_1 = nn.AdaptiveAvgPool2d((1,1))
        self.fc_1 = nn.Sequential(
            nn.Flatten(),
            nn.Linear(512*b_,1000),
            nn.Softmax(dim = 1),)
        self._initialization() 
Example #8
Source File: bag_average.py    From OpenNRE with MIT License 6 votes vote down vote up
def __init__(self, sentence_encoder, num_class, rel2id):
        """
        Args:
            sentence_encoder: encoder for sentences
            num_class: number of classes
            id2rel: dictionary of id -> relation name mapping
        """
        super().__init__()
        self.sentence_encoder = sentence_encoder
        self.num_class = num_class
        self.fc = nn.Linear(self.sentence_encoder.hidden_size, num_class)
        self.softmax = nn.Softmax(-1)
        self.rel2id = rel2id
        self.id2rel = {}
        self.drop = nn.Dropout()
        for rel, id in rel2id.items():
            self.id2rel[id] = rel 
Example #9
Source File: DenseNet2016.py    From Pytorch-Networks with MIT License 6 votes vote down vote up
def __init__(self,k,block_list,num_init_features=64, bn_size=4, 
                 drop_rate=0, memory_efficient=False):
        super(DenseNet,self).__init__()
        self.head_conv = nn.Sequential(
            nn.Conv2d(3,num_init_features,7,2,3,bias=False),
            nn.BatchNorm2d(num_init_features),
            nn.ReLU(inplace=True),)
        self.maxpool_1 = nn.MaxPool2d(3,2,1)
        self.dense_body, self.final_channels = self._make_layers(num_init_features,
                                  bn_size,block_list,k,drop_rate, memory_efficient)
        self.avgpool_1 = nn.AdaptiveAvgPool2d((1,1))
        self.fc_1 = nn.Sequential(
            nn.Flatten(),
            nn.Linear(self.final_channels,1000),
            nn.Softmax(dim = 1),)
        self._initialization() 
Example #10
Source File: MobileNet.py    From Pytorch-Networks with MIT License 6 votes vote down vote up
def __init__(self,):
        super(MobileNet_V1,self).__init__()
        self.conv = nn.Sequential(BasicConv(3,32,3,2,1),
             DPConv(32,64,1),
             DPConv(64,128,2),
             DPConv(128,128,1),
             DPConv(128,256,2),
             DPConv(256,256,1),
             DPConv(256,512,2),

             DPConv(512,512,1),
             DPConv(512,512,1),
             DPConv(512,512,1),
             DPConv(512,512,1),
             DPConv(512,512,1),

             DPConv(512,1024,2),
             DPConv(1024,1024,1),)
        
        self.final = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.Linear(1024,1000),
            nn.Softmax(dim=1)
        ) 
Example #11
Source File: helper.py    From torchscope with Apache License 2.0 6 votes vote down vote up
def compute_madd(module, inp, out):
    if isinstance(module, nn.Conv2d):
        return compute_Conv2d_madd(module, inp, out)
    elif isinstance(module, nn.ConvTranspose2d):
        return compute_ConvTranspose2d_madd(module, inp, out)
    elif isinstance(module, nn.BatchNorm2d):
        return compute_BatchNorm2d_madd(module, inp, out)
    elif isinstance(module, nn.MaxPool2d):
        return compute_MaxPool2d_madd(module, inp, out)
    elif isinstance(module, nn.AvgPool2d):
        return compute_AvgPool2d_madd(module, inp, out)
    elif isinstance(module, (nn.ReLU, nn.ReLU6)):
        return compute_ReLU_madd(module, inp, out)
    elif isinstance(module, nn.Softmax):
        return compute_Softmax_madd(module, inp, out)
    elif isinstance(module, nn.Linear):
        return compute_Linear_madd(module, inp, out)
    elif isinstance(module, nn.Bilinear):
        return compute_Bilinear_madd(module, inp[0], inp[1], out)
    else:
        return 0 
Example #12
Source File: ssd.py    From CSD-SSD with MIT License 6 votes vote down vote up
def __init__(self, phase, size, base, extras, head, num_classes):
        super(SSD, self).__init__()
        self.phase = phase
        self.num_classes = num_classes
        if(size==300):
            self.cfg = (coco, voc300)[num_classes == 21]
        else:
            self.cfg = (coco, voc512)[num_classes == 21]
        self.priorbox = PriorBox(self.cfg)
        self.priors = Variable(self.priorbox.forward(), volatile=True)
        self.size = size

        # SSD network
        self.vgg = nn.ModuleList(base)
        # Layer learns to scale the l2 normalized features from conv4_3
        self.L2Norm = L2Norm(512, 20)
        self.extras = nn.ModuleList(extras)

        self.loc = nn.ModuleList(head[0])
        self.conf = nn.ModuleList(head[1])

        if phase == 'test':
            self.softmax = nn.Softmax(dim=-1)
            self.detect = Detect(num_classes, 0, 200, 0.01, 0.45) 
Example #13
Source File: Mem2Seq.py    From ConvLab with MIT License 6 votes vote down vote up
def __init__(self, vocab, embedding_dim, hop, dropout, unk_mask):
        super(DecoderMemNN, self).__init__()
        self.num_vocab = vocab
        self.max_hops = hop
        self.embedding_dim = embedding_dim
        self.dropout = dropout
        self.unk_mask = unk_mask
        for hop in range(self.max_hops+1):
            C = nn.Embedding(self.num_vocab, embedding_dim, padding_idx=PAD_token)
            C.weight.data.normal_(0, 0.1)
            self.add_module("C_{}".format(hop), C)
        self.C = AttrProxy(self, "C_")
        self.softmax = nn.Softmax(dim=1)
        self.W = nn.Linear(embedding_dim,1)
        self.W1 = nn.Linear(2*embedding_dim,self.num_vocab)
        self.gru = nn.GRU(embedding_dim, embedding_dim, dropout=dropout) 
Example #14
Source File: aggregator_predict.py    From SQL_Database_Optimization with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, N_word, N_h, N_depth, use_ca):
        super(AggPredictor, self).__init__()
        self.use_ca = use_ca

        self.agg_lstm = nn.LSTM(input_size=N_word, hidden_size=N_h/2,
                num_layers=N_depth, batch_first=True,
                dropout=0.3, bidirectional=True)
        if use_ca:
            print "Using column attention on aggregator predicting"
            self.agg_col_name_enc = nn.LSTM(input_size=N_word,
                    hidden_size=N_h/2, num_layers=N_depth,
                    batch_first=True, dropout=0.3, bidirectional=True)
            self.agg_att = nn.Linear(N_h, N_h)
        else:
            print "Not using column attention on aggregator predicting"
            self.agg_att = nn.Linear(N_h, 1)
        self.agg_out = nn.Sequential(nn.Linear(N_h, N_h),
                nn.Tanh(), nn.Linear(N_h, 6))
        self.softmax = nn.Softmax() 
Example #15
Source File: selection_predict.py    From SQL_Database_Optimization with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, N_word, N_h, N_depth, max_tok_num, use_ca):
        super(SelPredictor, self).__init__()
        self.use_ca = use_ca
        self.max_tok_num = max_tok_num
        self.sel_lstm = nn.LSTM(input_size=N_word, hidden_size=N_h/2,
                num_layers=N_depth, batch_first=True,
                dropout=0.3, bidirectional=True)
        if use_ca:
            print "Using column attention on selection predicting"
            self.sel_att = nn.Linear(N_h, N_h)
        else:
            print "Not using column attention on selection predicting"
            self.sel_att = nn.Linear(N_h, 1)
        self.sel_col_name_enc = nn.LSTM(input_size=N_word, hidden_size=N_h/2,
                num_layers=N_depth, batch_first=True,
                dropout=0.3, bidirectional=True)
        self.sel_out_K = nn.Linear(N_h, N_h)
        self.sel_out_col = nn.Linear(N_h, N_h)
        self.sel_out = nn.Sequential(nn.Tanh(), nn.Linear(N_h, 1))
        self.softmax = nn.Softmax() 
Example #16
Source File: rfb.py    From ssds.pytorch with MIT License 6 votes vote down vote up
def __init__(self, base, extras, norm, head, feature_layer, num_classes):
        super(RFB, self).__init__()
        self.num_classes = num_classes
        # RFB network
        self.base = nn.ModuleList(base)
        self.norm = nn.ModuleList(norm)
        self.extras = nn.ModuleList(extras)
        
        self.loc = nn.ModuleList(head[0])
        self.conf = nn.ModuleList(head[1])
        self.softmax = nn.Softmax(dim=-1)

        self.feature_layer = feature_layer[0]
        self.indicator = 0
        for layer in self.feature_layer:
            if isinstance(layer, int):
                continue
            elif layer == '' or layer == 'S':
                break
            else:
                self.indicator += 1 
Example #17
Source File: yolo.py    From ssds.pytorch with MIT License 6 votes vote down vote up
def __init__(self, base, extras, head, feature_layer, num_classes):
        super(YOLO, self).__init__()
        self.num_classes = num_classes
        # SSD network
        self.base = nn.ModuleList(base)
        self.extras = nn.ModuleList(extras)

        self.loc = nn.ModuleList(head[0])
        self.conf = nn.ModuleList(head[1])
        self.softmax = nn.Softmax(dim=-1)

        self.feature_layer = [ f for feature in feature_layer[0] for f in feature]
        # self.feature_index = [ len(feature) for feature in feature_layer[0]]
        self.feature_index = list()
        s = -1
        for feature in feature_layer[0]:
            s += len(feature)
            self.feature_index.append(s) 
Example #18
Source File: rfb_lite.py    From ssds.pytorch with MIT License 6 votes vote down vote up
def __init__(self, base, extras, head, feature_layer, num_classes):
        super(RFBLite, self).__init__()
        self.num_classes = num_classes
        # rfb network
        self.base = nn.ModuleList(base)
        self.norm = BasicRFB_a_lite(feature_layer[1][0],feature_layer[1][0],stride = 1,scale=1.0)
        self.extras = nn.ModuleList(extras)

        self.loc = nn.ModuleList(head[0])
        self.conf = nn.ModuleList(head[1])
        self.softmax = nn.Softmax(dim=-1)


        self.feature_layer = feature_layer[0]
        self.indicator = 0
        for layer in self.feature_layer:
            if isinstance(layer, int):
                continue
            elif layer == '' or layer == 'S':
                break
            else:
                self.indicator += 1 
Example #19
Source File: trade_utils.py    From ConvLab with MIT License 6 votes vote down vote up
def __init__(self, lang, shared_emb, vocab_size, hidden_size, dropout, slots, nb_gate):
        super(Generator, self).__init__()
        self.vocab_size = vocab_size
        self.lang = lang
        self.embedding = shared_emb
        self.dropout_layer = nn.Dropout(dropout)
        self.gru = nn.GRU(hidden_size, hidden_size, dropout=dropout)
        self.nb_gate = nb_gate
        self.hidden_size = hidden_size
        self.W_ratio = nn.Linear(3 * hidden_size, 1)
        self.softmax = nn.Softmax(dim=1)
        self.sigmoid = nn.Sigmoid()
        self.slots = slots

        self.W_gate = nn.Linear(hidden_size, nb_gate)

        # Create independent slot embeddings
        self.slot_w2i = {}
        for slot in self.slots:
            if slot.split("-")[0] not in self.slot_w2i.keys():
                self.slot_w2i[slot.split("-")[0]] = len(self.slot_w2i)
            if slot.split("-")[1] not in self.slot_w2i.keys():
                self.slot_w2i[slot.split("-")[1]] = len(self.slot_w2i)
        self.Slot_emb = nn.Embedding(len(self.slot_w2i), hidden_size)
        self.Slot_emb.weight.data.normal_(0, 0.1) 
Example #20
Source File: gpt.py    From comet-commonsense with Apache License 2.0 6 votes vote down vote up
def _attn(self, q, k, v, sequence_mask):
        w = torch.matmul(q, k)
        if self.scale:
            w = w / math.sqrt(v.size(-1))

        b_subset = self.b[:, :, :w.size(-2), :w.size(-1)]

        if sequence_mask is not None:
            b_subset = b_subset * sequence_mask.view(
                sequence_mask.size(0), 1, -1)
            b_subset = b_subset.permute(1, 0, 2, 3)

        w = w * b_subset + -1e9 * (1 - b_subset)
        w = nn.Softmax(dim=-1)(w)
        w = self.attn_dropout(w)
        return torch.matmul(w, v) 
Example #21
Source File: GlobalAttention.py    From video-caption-openNMT.pytorch with MIT License 6 votes vote down vote up
def __init__(self, dim, coverage=False, attn_type="dot"):
        super(GlobalAttention, self).__init__()

        self.dim = dim
        self.attn_type = attn_type
        assert (self.attn_type in ["dot", "general", "mlp"]), (
                "Please select a valid attention type.")

        if self.attn_type == "general":
            self.linear_in = nn.Linear(dim, dim, bias=False)
        elif self.attn_type == "mlp":
            self.linear_context = nn.Linear(dim, dim, bias=False)
            self.linear_query = nn.Linear(dim, dim, bias=True)
            self.v = nn.Linear(dim, 1, bias=False)
        # mlp wants it with bias
        out_bias = self.attn_type == "mlp"
        self.linear_out = nn.Linear(dim*2, dim, bias=out_bias)

        self.sm = nn.Softmax(dim=-1)
        self.tanh = nn.Tanh()

        if coverage:
            self.linear_cover = nn.Linear(1, dim, bias=False) 
Example #22
Source File: gat_layers.py    From DeepInf with MIT License 6 votes vote down vote up
def __init__(self, n_head, f_in, f_out, attn_dropout, bias=True):
        super(BatchMultiHeadGraphAttention, self).__init__()
        self.n_head = n_head
        self.w = Parameter(torch.Tensor(n_head, f_in, f_out))
        self.a_src = Parameter(torch.Tensor(n_head, f_out, 1))
        self.a_dst = Parameter(torch.Tensor(n_head, f_out, 1))

        self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)
        self.softmax = nn.Softmax(dim=-1)
        self.dropout = nn.Dropout(attn_dropout)
        if bias:
            self.bias = Parameter(torch.Tensor(f_out))
            init.constant_(self.bias, 0)
        else:
            self.register_parameter('bias', None)

        init.xavier_uniform_(self.w)
        init.xavier_uniform_(self.a_src)
        init.xavier_uniform_(self.a_dst) 
Example #23
Source File: fssd.py    From ssds.pytorch with MIT License 6 votes vote down vote up
def __init__(self, base, extras, head, features, feature_layer, num_classes):
        super(FSSD, self).__init__()
        self.num_classes = num_classes
        # SSD network
        self.base = nn.ModuleList(base)
        self.extras = nn.ModuleList(extras)
        self.feature_layer = feature_layer[0][0]
        self.transforms = nn.ModuleList(features[0])
        self.pyramids = nn.ModuleList(features[1])
        # print(self.base)
        # Layer learns to scale the l2 normalized features from conv4_3
        self.norm = nn.BatchNorm2d(int(feature_layer[0][1][-1]/2)*len(self.transforms),affine=True)
        # print(self.extras)

        self.loc = nn.ModuleList(head[0])
        self.conf = nn.ModuleList(head[1])
        # print(self.loc)

        self.softmax = nn.Softmax(dim=-1) 
Example #24
Source File: fssd_lite.py    From ssds.pytorch with MIT License 6 votes vote down vote up
def __init__(self, base, extras, head, features, feature_layer, num_classes):
        super(FSSDLite, self).__init__()
        self.num_classes = num_classes
        # SSD network
        self.base = nn.ModuleList(base)
        self.extras = nn.ModuleList(extras)
        self.feature_layer = feature_layer[0][0]
        self.transforms = nn.ModuleList(features[0])
        self.pyramids = nn.ModuleList(features[1])
        # print(self.base)
        self.norm = nn.BatchNorm2d(int(feature_layer[0][1][-1]/2)*len(self.transforms),affine=True)
        # print(self.extras)

        self.loc = nn.ModuleList(head[0])
        self.conf = nn.ModuleList(head[1])
        # print(self.loc)

        self.softmax = nn.Softmax(dim=-1) 
Example #25
Source File: gat_layers.py    From DeepInf with MIT License 6 votes vote down vote up
def __init__(self, n_head, f_in, f_out, attn_dropout, bias=True):
        super(MultiHeadGraphAttention, self).__init__()
        self.n_head = n_head
        self.w = Parameter(torch.Tensor(n_head, f_in, f_out))
        self.a_src = Parameter(torch.Tensor(n_head, f_out, 1))
        self.a_dst = Parameter(torch.Tensor(n_head, f_out, 1))

        self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)
        self.softmax = nn.Softmax(dim=-1)
        self.dropout = nn.Dropout(attn_dropout)

        if bias:
            self.bias = Parameter(torch.Tensor(f_out))
            init.constant_(self.bias, 0)
        else:
            self.register_parameter('bias', None)

        init.xavier_uniform_(self.w)
        init.xavier_uniform_(self.a_src)
        init.xavier_uniform_(self.a_dst) 
Example #26
Source File: operators.py    From Fast_Seg with Apache License 2.0 5 votes vote down vote up
def __init__(self, inplane, plane):
        super(A2Block, self).__init__()
        self.down = nn.Conv2d(inplane, plane, 1)
        self.up = nn.Conv2d(plane, inplane, 1)
        self.gather_down = nn.Conv2d(inplane, plane, 1)
        self.distribue_down = nn.Conv2d(inplane, plane, 1)
        self.softmax = nn.Softmax(dim=-1) 
Example #27
Source File: pose_hrnet.py    From PoseWarper with Apache License 2.0 5 votes vote down vote up
def _apply_channel_softmax(self):
       return nn.Softmax(dim=1) 
Example #28
Source File: pose_hrnet_PoseAgg.py    From PoseWarper with Apache License 2.0 5 votes vote down vote up
def _apply_channel_softmax(self):
       return nn.Softmax(dim=1) 
Example #29
Source File: lstm.py    From dogTorch with MIT License 5 votes vote down vote up
def __init__(self, args):
        super(Lstm, self).__init__()

        self.class_weights = args.dataset.CLASS_WEIGHTS[torch.LongTensor(
            args.imus)]
        self.num_classes = args.num_classes
        self.imus = args.imus
        self.output_length = args.output_length
        self.num_layers = args.num_layers
        self.hidden_size = args.hidden_size
        self.imu_feature = args.imu_feature
        self.use_attention = args.use_attention
        self.regression = args.regression

        self.encoder = nn.LSTM(args.hidden_size, args.hidden_size,
                               args.num_layers)
        if self.use_attention:
            self.embedding_output = nn.Linear(
                args.imu_feature + args.hidden_size, args.hidden_size)
        else:
            self.embedding_output = nn.Linear(args.imu_feature,
                                              args.hidden_size)
        self.decoder = nn.LSTMCell(args.hidden_size, args.hidden_size,
                                   args.num_layers)
        self.out = nn.Linear(args.hidden_size, args.imu_feature)
        if self.use_attention:
            self.attender = nn.Linear(args.hidden_size, args.input_length)

        for i in self.imus:
            setattr(self, 'imu{}'.format(i),
                    nn.Linear(args.imu_feature, args.num_classes))

        self.dropout = nn.Dropout()
        self.relu = nn.ReLU()
        self.softmax = nn.Softmax()
        self.hidden_linear = nn.Linear(args.hidden_size, args.hidden_size)
        self.cell_linear = nn.Linear(args.hidden_size, args.hidden_size) 
Example #30
Source File: ssd.py    From ssds.pytorch with MIT License 5 votes vote down vote up
def __init__(self, base, extras, head, feature_layer, num_classes):
        super(SSD, self).__init__()
        self.num_classes = num_classes
        # SSD network
        self.base = nn.ModuleList(base)
        self.norm = L2Norm(feature_layer[1][0], 20)
        self.extras = nn.ModuleList(extras)

        self.loc = nn.ModuleList(head[0])
        self.conf = nn.ModuleList(head[1])
        self.softmax = nn.Softmax(dim=-1)

        self.feature_layer = feature_layer[0]