Python torch.nn.ModuleList() Examples

The following are 30 code examples of torch.nn.ModuleList(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: rfp.py    From mmdetection with Apache License 2.0 7 votes vote down vote up
def __init__(self, in_channels, out_channels, dilations=(1, 3, 6, 1)):
        super().__init__()
        assert dilations[-1] == 1
        self.aspp = nn.ModuleList()
        for dilation in dilations:
            kernel_size = 3 if dilation > 1 else 1
            padding = dilation if dilation > 1 else 0
            conv = nn.Conv2d(
                in_channels,
                out_channels,
                kernel_size=kernel_size,
                stride=1,
                dilation=dilation,
                padding=padding,
                bias=True)
            self.aspp.append(conv)
        self.gap = nn.AdaptiveAvgPool2d(1)
        self.init_weights() 
Example #2
Source File: CPN2017.py    From Pytorch-Networks with MIT License 6 votes vote down vote up
def __init__(self, channel_settings, output_shape, num_class):
        super(globalNet, self).__init__()
        self.channel_settings = channel_settings
        laterals, upsamples, predict = [], [], []
        for i in range(len(channel_settings)):
            laterals.append(self._lateral(channel_settings[i]))
            predict.append(self._predict(output_shape, num_class))
            if i != len(channel_settings) - 1:
                upsamples.append(self._upsample())
        self.laterals = nn.ModuleList(laterals)
        self.upsamples = nn.ModuleList(upsamples)
        self.predict = nn.ModuleList(predict)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example #3
Source File: model.py    From easy-faster-rcnn.pytorch with MIT License 6 votes vote down vote up
def __init__(self, backbone: BackboneBase, num_classes: int, pooler_mode: Pooler.Mode,
                 anchor_ratios: List[Tuple[int, int]], anchor_sizes: List[int],
                 rpn_pre_nms_top_n: int, rpn_post_nms_top_n: int,
                 anchor_smooth_l1_loss_beta: Optional[float] = None, proposal_smooth_l1_loss_beta: Optional[float] = None):
        super().__init__()

        self.features, hidden, num_features_out, num_hidden_out = backbone.features()
        self._bn_modules = nn.ModuleList([it for it in self.features.modules() if isinstance(it, nn.BatchNorm2d)] +
                                         [it for it in hidden.modules() if isinstance(it, nn.BatchNorm2d)])

        # NOTE: It's crucial to freeze batch normalization modules for few batches training, which can be done by following processes
        #       (1) Change mode to `eval`
        #       (2) Disable gradient (we move this process into `forward`)
        for bn_module in self._bn_modules:
            for parameter in bn_module.parameters():
                parameter.requires_grad = False

        self.rpn = RegionProposalNetwork(num_features_out, anchor_ratios, anchor_sizes, rpn_pre_nms_top_n, rpn_post_nms_top_n, anchor_smooth_l1_loss_beta)
        self.detection = Model.Detection(pooler_mode, hidden, num_hidden_out, num_classes, proposal_smooth_l1_loss_beta) 
Example #4
Source File: deeplab_resnet_3D.py    From pytorch-mri-segmentation-3D with MIT License 6 votes vote down vote up
def __init__(self,dilation_series,padding_series,NoLabels):
        super(Classifier_Module, self).__init__()
        self.conv3d_list = nn.ModuleList()
        self.bn3d_list = nn.ModuleList()
        for dilation,padding in zip(dilation_series,padding_series):
            self.conv3d_list.append(nn.Conv3d(2048, 256,kernel_size=3,stride=1, padding =padding, dilation = dilation,bias = True))
            self.bn3d_list.append(nn.BatchNorm3d(256, affine = affine_par))
        self.num_concats = len(self.conv3d_list) + 2
        #add global pooling, add batchnorm
        self.conv1x1_1 = nn.Conv3d(2048, 256, kernel_size=1, stride=1)
        self.conv1x1_2 = nn.Conv3d(2048, 256, kernel_size=1, stride=1)
        self.conv1x1_3 = nn.Conv3d(256*self.num_concats, 256, kernel_size=1, stride=1)
        self.conv1x1_4 = nn.Conv3d(256, NoLabels, kernel_size=1, stride=1)

        self.bn1 = nn.BatchNorm3d(256, affine = affine_par)
        self.bn2 = nn.BatchNorm3d(256*self.num_concats, affine= affine_par)
        self.bn3 = nn.BatchNorm3d(256, affine= affine_par)
        #global avg pool
        #input = 1x512xdim1xdim2xdim3
        #output = 1x512x1x1x1
        #XXX check

        for m in self.conv3d_list:
            m.weight.data.normal_(0, 0.01) 
Example #5
Source File: anchor_free_head.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def _init_cls_convs(self):
        """Initialize classification conv layers of the head."""
        self.cls_convs = nn.ModuleList()
        for i in range(self.stacked_convs):
            chn = self.in_channels if i == 0 else self.feat_channels
            if self.dcn_on_last_conv and i == self.stacked_convs - 1:
                conv_cfg = dict(type='DCNv2')
            else:
                conv_cfg = self.conv_cfg
            self.cls_convs.append(
                ConvModule(
                    chn,
                    self.feat_channels,
                    3,
                    stride=1,
                    padding=1,
                    conv_cfg=conv_cfg,
                    norm_cfg=self.norm_cfg,
                    bias=self.conv_bias)) 
Example #6
Source File: exp_net_3D.py    From pytorch-mri-segmentation-3D with MIT License 6 votes vote down vote up
def __init__(self,dilation_series, padding_series, inplanes, midplanes, outplanes):
        super(ASPP_Module, self).__init__()
        self.conv3d_list = nn.ModuleList()
        self.bn3d_list = nn.ModuleList()
        for dilation,padding in zip(dilation_series,padding_series):
            self.conv3d_list.append(nn.Conv3d(inplanes, midplanes,kernel_size=3,stride=1, padding =padding, dilation = dilation,bias = True))
            self.bn3d_list.append(nn.BatchNorm3d(midplanes, affine = affine_par))
        self.num_concats = len(self.conv3d_list) + 2
        #add global pooling, add batchnorm
        self.conv1x1_1 = nn.Conv3d(inplanes, midplanes, kernel_size=1, stride=1)
        self.conv1x1_2 = nn.Conv3d(inplanes, midplanes, kernel_size=1, stride=1)
        self.conv1x1_3 = nn.Conv3d(midplanes*self.num_concats, outplanes, kernel_size=1, stride=1)

        self.relu = nn.PReLU()

        self.bn1 = nn.BatchNorm3d(midplanes, affine = affine_par)
        self.bn2 = nn.BatchNorm3d(midplanes*self.num_concats, affine= affine_par)
        self.bn3 = nn.BatchNorm3d(midplanes, affine= affine_par) 
Example #7
Source File: ReadoutFunction.py    From nmp_qc with MIT License 6 votes vote down vote up
def init_ggnn(self, params):
        learn_args = []
        learn_modules = []
        args = {}

        # i
        learn_modules.append(NNet(n_in=2*params['in'], n_out=params['target']))

        # j
        learn_modules.append(NNet(n_in=params['in'], n_out=params['target']))

        args['out'] = params['target']

        return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args


    # Battaglia et al. (2016), Interaction Networks 
Example #8
Source File: ReadoutFunction.py    From nmp_qc with MIT License 6 votes vote down vote up
def init_duvenaud(self, params):
        learn_args = []
        learn_modules = []
        args = {}

        args['out'] = params['out']

        # Define a parameter matrix W for each layer.
        for l in range(params['layers']):
            learn_args.append(nn.Parameter(torch.randn(params['in'][l], params['out'])))

        # learn_modules.append(nn.Linear(params['out'], params['target']))

        learn_modules.append(NNet(n_in=params['out'], n_out=params['target']))
        return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args

    # GG-NN, Li et al. 
Example #9
Source File: rfp.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 rfp_steps,
                 rfp_backbone,
                 aspp_out_channels,
                 aspp_dilations=(1, 3, 6, 1),
                 **kwargs):
        super().__init__(**kwargs)
        self.rfp_steps = rfp_steps
        self.rfp_modules = nn.ModuleList()
        for rfp_idx in range(1, rfp_steps):
            rfp_module = build_backbone(rfp_backbone)
            self.rfp_modules.append(rfp_module)
        self.rfp_aspp = ASPP(self.out_channels, aspp_out_channels,
                             aspp_dilations)
        self.rfp_weight = nn.Conv2d(
            self.out_channels,
            1,
            kernel_size=1,
            stride=1,
            padding=0,
            bias=True) 
Example #10
Source File: MPNN_IntNet.py    From nmp_qc with MIT License 6 votes vote down vote up
def __init__(self, in_n, out_message, out_update, l_target, type='regression'):
        super(MpnnIntNet, self).__init__()

        n_layers = len(out_update)

        # Define message 1 & 2
        self.m = nn.ModuleList([MessageFunction('intnet', args={'in': 2*in_n[0] + in_n[1], 'out': out_message[i]})
                                if i == 0 else
                                MessageFunction('intnet', args={'in': 2*out_update[i-1] + in_n[1], 'out': out_message[i]})
                                for i in range(n_layers)])

        # Define Update 1 & 2
        self.u = nn.ModuleList([UpdateFunction('intnet', args={'in': in_n[0]+out_message[i], 'out': out_update[i]})
                                if i == 0 else
                                UpdateFunction('intnet', args={'in': out_update[i-1]+out_message[i], 'out': out_update[i]})
                                for i in range(n_layers)])

        # Define Readout
        self.r = ReadoutFunction('intnet', args={'in': out_update[-1], 'target': l_target})

        self.type = type 
Example #11
Source File: MPNN.py    From nmp_qc with MIT License 6 votes vote down vote up
def __init__(self, in_n, hidden_state_size, message_size, n_layers, l_target, type='regression'):
        super(MPNN, self).__init__()

        # Define message
        self.m = nn.ModuleList(
            [MessageFunction('mpnn', args={'edge_feat': in_n[1], 'in': hidden_state_size, 'out': message_size})])

        # Define Update
        self.u = nn.ModuleList([UpdateFunction('mpnn',
                                               args={'in_m': message_size,
                                                     'out': hidden_state_size})])

        # Define Readout
        self.r = ReadoutFunction('mpnn',
                                 args={'in': hidden_state_size,
                                       'target': l_target})

        self.type = type

        self.args = {}
        self.args['out'] = hidden_state_size

        self.n_layers = n_layers 
Example #12
Source File: UpdateFunction.py    From nmp_qc with MIT License 6 votes vote down vote up
def __set_update(self, update_def, args):
        self.u_definition = update_def.lower()

        self.u_function = {
                    'duvenaud':         self.u_duvenaud,
                    'ggnn':             self.u_ggnn,
                    'intnet':           self.u_intnet,
                    'mpnn':             self.u_mpnn
                }.get(self.u_definition, None)

        if self.u_function is None:
            print('WARNING!: Update Function has not been set correctly\n\tIncorrect definition ' + update_def)

        init_parameters = {
            'duvenaud':         self.init_duvenaud,
            'ggnn':             self.init_ggnn,
            'intnet':           self.init_intnet,
            'mpnn':             self.init_mpnn
        }.get(self.u_definition, lambda x: (nn.ParameterList([]), nn.ModuleList([]), {}))

        self.learn_args, self.learn_modules, self.args = init_parameters(args)

    # Get the name of the used update function 
Example #13
Source File: anchor_free_head.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def _init_reg_convs(self):
        """Initialize bbox regression conv layers of the head."""
        self.reg_convs = nn.ModuleList()
        for i in range(self.stacked_convs):
            chn = self.in_channels if i == 0 else self.feat_channels
            if self.dcn_on_last_conv and i == self.stacked_convs - 1:
                conv_cfg = dict(type='DCNv2')
            else:
                conv_cfg = self.conv_cfg
            self.reg_convs.append(
                ConvModule(
                    chn,
                    self.feat_channels,
                    3,
                    stride=1,
                    padding=1,
                    conv_cfg=conv_cfg,
                    norm_cfg=self.norm_cfg,
                    bias=self.conv_bias)) 
Example #14
Source File: GNNlikeCNN2015.py    From Pytorch-Networks with MIT License 6 votes vote down vote up
def __init__(self, Gnn_layers, use_gpu):
        super().__init__()
        self.gnn_layers = nn.ModuleList([nn.Conv2d(in_channels = 4, out_channels = 16,
                                   kernel_size = (5,5), stride = 1,padding = 2),
                                         nn.Conv2d(in_channels = 16, out_channels = 16,
                                   kernel_size = (5,5), stride = 1,padding = 2),
                                   nn.Conv2d(in_channels = 16, out_channels = 32,
                                   kernel_size = (5,5), stride = 1,padding = 2),
                                   nn.Conv2d(in_channels = 32, out_channels = 32,
                                   kernel_size = (5,5), stride = 1,padding = 2),
                                   nn.Conv2d(in_channels = 32, out_channels = 32,
                                   kernel_size = (5,5), stride = 1,padding = 2),
                                         nn.Conv2d(in_channels = 32, out_channels = 128,
                                   kernel_size = (1,1), stride = 1,padding = 0),
                                         nn.Conv2d(in_channels = 128, out_channels = 2,
                                   kernel_size = (1,1), stride = 1,padding = 0)])
        self.gnn_actfs = nn.ModuleList([nn.ReLU() for l in range(7)])
        self.use_gpu = use_gpu
        self._initialize_weights_norm() 
Example #15
Source File: MPNN_Duvenaud.py    From nmp_qc with MIT License 6 votes vote down vote up
def __init__(self, d, in_n, out_update, hidden_state_readout, l_target, type='regression'):
        super(MpnnDuvenaud, self).__init__()

        n_layers = len(out_update)

        # Define message 1 & 2
        self.m = nn.ModuleList([MessageFunction('duvenaud') for _ in range(n_layers)])

        # Define Update 1 & 2
        self.u = nn.ModuleList([UpdateFunction('duvenaud', args={'deg': d, 'in': self.m[i].get_out_size(in_n[0], in_n[1]), 'out': out_update[0]}) if i == 0 else
                                UpdateFunction('duvenaud', args={'deg': d, 'in': self.m[i].get_out_size(out_update[i-1], in_n[1]), 'out': out_update[i]}) for i in range(n_layers)])

        # Define Readout
        self.r = ReadoutFunction('duvenaud',
                                 args={'layers': len(self.m) + 1,
                                       'in': [in_n[0] if i == 0 else out_update[i-1] for i in range(n_layers+1)],
                                       'out': hidden_state_readout,
                                       'target': l_target})

        self.type = type 
Example #16
Source File: erfnet_imagenet.py    From Pytorch-Project-Template with MIT License 6 votes vote down vote up
def __init__(self):
        super().__init__()
        self.initial_block = DownsamplerBlock(3, 16)

        self.layers = nn.ModuleList()

        self.layers.append(DownsamplerBlock(16, 64))

        for x in range(0, 5):  # 5 times
            self.layers.append(non_bottleneck_1d(64, 0.1, 1))

        self.layers.append(DownsamplerBlock(64, 128))

        for x in range(0, 2):  # 2 times
            self.layers.append(non_bottleneck_1d(128, 0.1, 2))
            self.layers.append(non_bottleneck_1d(128, 0.1, 4))
            self.layers.append(non_bottleneck_1d(128, 0.1, 8))
            self.layers.append(non_bottleneck_1d(128, 0.1, 16)) 
Example #17
Source File: models.py    From cvpr2018-hnd with MIT License 6 votes vote down vote up
def init_truncated_normal(model, aux_str=''):
    if model is None: return None
    init_path = '{path}/{in_dim:d}_{out_dim:d}{aux_str}.pth' \
                .format(path=path, in_dim=model.in_features, out_dim=model.out_features, aux_str=aux_str)
    if os.path.isfile(init_path):
        model.load_state_dict(torch.load(init_path))
        print('load init weight: {init_path}'.format(init_path=init_path))
    else:
        if isinstance(model, nn.ModuleList):
            [truncated_normal(sub) for sub in model]
        else:
            truncated_normal(model)
        print('generate init weight: {init_path}'.format(init_path=init_path))
        torch.save(model.state_dict(), init_path)
        print('save init weight: {init_path}'.format(init_path=init_path))
    
    return model 
Example #18
Source File: GST.py    From GST-Tacotron with MIT License 6 votes vote down vote up
def __init__(self):

        super().__init__()
        K = len(hp.ref_enc_filters)
        filters = [1] + hp.ref_enc_filters
        convs = [nn.Conv2d(in_channels=filters[i],
                           out_channels=filters[i + 1],
                           kernel_size=(3, 3),
                           stride=(2, 2),
                           padding=(1, 1)) for i in range(K)]
        self.convs = nn.ModuleList(convs)
        self.bns = nn.ModuleList([nn.BatchNorm2d(num_features=hp.ref_enc_filters[i]) for i in range(K)])

        out_channels = self.calculate_channels(hp.n_mels, 3, 2, 1, K)
        self.gru = nn.GRU(input_size=hp.ref_enc_filters[-1] * out_channels,
                          hidden_size=hp.E // 2,
                          batch_first=True) 
Example #19
Source File: MPNN_GGNN.py    From nmp_qc with MIT License 6 votes vote down vote up
def __init__(self, e, hidden_state_size, message_size, n_layers, l_target, type='regression'):
        super(MpnnGGNN, self).__init__()

        # Define message
        self.m = nn.ModuleList([MessageFunction('ggnn', args={'e_label': e, 'in': hidden_state_size, 'out': message_size})])

        # Define Update
        self.u = nn.ModuleList([UpdateFunction('ggnn',
                                                args={'in_m': message_size,
                                                'out': hidden_state_size})])

        # Define Readout
        self.r = ReadoutFunction('ggnn',
                                 args={'in': hidden_state_size,
                                       'target': l_target})

        self.type = type

        self.args = {}
        self.args['out'] = hidden_state_size

        self.n_layers = n_layers 
Example #20
Source File: Network.py    From GST-Tacotron with MIT License 6 votes vote down vote up
def __init__(self):
        super().__init__()
        self.prenet = PreNet(in_features=hp.E)  # [N, T, E//2]

        self.conv1d_bank = Conv1dBank(K=hp.K, in_channels=hp.E // 2, out_channels=hp.E // 2)  # [N, T, E//2 * K]

        self.conv1d_1 = Conv1d(in_channels=hp.K * hp.E // 2, out_channels=hp.E // 2, kernel_size=3)  # [N, T, E//2]
        self.conv1d_2 = Conv1d(in_channels=hp.E // 2, out_channels=hp.E // 2, kernel_size=3)  # [N, T, E//2]
        self.bn1 = BatchNorm1d(num_features=hp.E // 2)
        self.bn2 = BatchNorm1d(num_features=hp.E // 2)

        self.highways = nn.ModuleList()
        for i in range(hp.num_highways):
            self.highways.append(Highway(in_features=hp.E // 2, out_features=hp.E // 2))

        self.gru = nn.GRU(input_size=hp.E // 2, hidden_size=hp.E // 2, num_layers=2, bidirectional=True, batch_first=True) 
Example #21
Source File: albunet.py    From neural-pipeline with MIT License 6 votes vote down vote up
def __init__(self, base_model: torch.nn.Module, num_classes: int, weights_url: str = None):
        super().__init__()
        if not hasattr(self, 'decoder_block'):
            self.decoder_block = UnetDecoderBlock
        if not hasattr(self, 'bottleneck_type'):
            self.bottleneck_type = ConvBottleneck

        if weights_url is not None:
            print("Model weights inited by url")

            pretrained_weights = model_zoo.load_url(weights_url)
            model_state_dict = base_model.state_dict()
            pretrained_weights = {k: v for k, v in pretrained_weights.items() if k in model_state_dict}
            base_model.load_state_dict(pretrained_weights)

        filters = [64, 64, 128, 256, 512]

        self.bottlenecks = nn.ModuleList([self.bottleneck_type(f * 2, f) for f in reversed(filters[:-1])])
        self.decoder_stages = nn.ModuleList([self.get_decoder(filters, idx) for idx in range(1, len(filters))])

        self.encoder_stages = nn.ModuleList([self.get_encoder(base_model, idx) for idx in range(len(filters))])

        self.last_upsample = self.decoder_block(filters[0], filters[0])
        self.final = self.make_final_classifier(filters[0], num_classes) 
Example #22
Source File: base_roi_extractor.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def build_roi_layers(self, layer_cfg, featmap_strides):
        """Build RoI operator to extract feature from each level feature map.

        Args:
            layer_cfg (dict): Dictionary to construct and config RoI layer
                operation. Options are modules under ``mmdet/ops`` such as
                ``RoIAlign``.
            featmap_strides (int): The stride of input feature map w.r.t to the
                original image size, which would be used to scale RoI
                coordinate (original image coordinate system) to feature
                coordinate system.

        Returns:
            nn.ModuleList: The RoI extractor modules for each level feature
                map.
        """

        cfg = layer_cfg.copy()
        layer_type = cfg.pop('type')
        assert hasattr(ops, layer_type)
        layer_cls = getattr(ops, layer_type)
        roi_layers = nn.ModuleList(
            [layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
        return roi_layers 
Example #23
Source File: test_config.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def _check_bbox_head(bbox_cfg, bbox_head):
    import torch.nn as nn
    if isinstance(bbox_cfg, list):
        for single_bbox_cfg, single_bbox_head in zip(bbox_cfg, bbox_head):
            _check_bbox_head(single_bbox_cfg, single_bbox_head)
    elif isinstance(bbox_head, nn.ModuleList):
        for single_bbox_head in bbox_head:
            _check_bbox_head(bbox_cfg, single_bbox_head)
    else:
        assert bbox_cfg['type'] == bbox_head.__class__.__name__
        assert bbox_cfg.in_channels == bbox_head.in_channels
        with_cls = bbox_cfg.get('with_cls', True)
        if with_cls:
            fc_out_channels = bbox_cfg.get('fc_out_channels', 2048)
            assert (fc_out_channels == bbox_head.fc_cls.in_features)
            assert bbox_cfg.num_classes + 1 == bbox_head.fc_cls.out_features

        with_reg = bbox_cfg.get('with_reg', True)
        if with_reg:
            out_dim = (4 if bbox_cfg.reg_class_agnostic else 4 *
                       bbox_cfg.num_classes)
            assert bbox_head.fc_reg.out_features == out_dim 
Example #24
Source File: test_config.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def _check_mask_head(mask_cfg, mask_head):
    import torch.nn as nn
    if isinstance(mask_cfg, list):
        for single_mask_cfg, single_mask_head in zip(mask_cfg, mask_head):
            _check_mask_head(single_mask_cfg, single_mask_head)
    elif isinstance(mask_head, nn.ModuleList):
        for single_mask_head in mask_head:
            _check_mask_head(mask_cfg, single_mask_head)
    else:
        assert mask_cfg['type'] == mask_head.__class__.__name__
        assert mask_cfg.in_channels == mask_head.in_channels
        class_agnostic = mask_cfg.get('class_agnostic', False)
        out_dim = (1 if class_agnostic else mask_cfg.num_classes)
        if hasattr(mask_head, 'conv_logits'):
            assert (mask_cfg.conv_out_channels ==
                    mask_head.conv_logits.in_channels)
            assert mask_head.conv_logits.out_channels == out_dim
        else:
            assert mask_cfg.fc_out_channels == mask_head.fc_logits.in_features
            assert (mask_head.fc_logits.out_features == out_dim *
                    mask_head.output_area) 
Example #25
Source File: UpdateFunction.py    From nmp_qc with MIT License 5 votes vote down vote up
def init_mpnn(self, params):
        learn_args = []
        learn_modules = []
        args = {}

        args['in_m'] = params['in_m']
        args['out'] = params['out']

        # GRU
        learn_modules.append(nn.GRU(params['in_m'], params['out']))

        return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args 
Example #26
Source File: MessageFunction.py    From nmp_qc with MIT License 5 votes vote down vote up
def init_intnet(self, params):
        learn_args = []
        learn_modules = []
        args = {}
        args['in'] = params['in']
        args['out'] = params['out']
        learn_modules.append(NNet(n_in=params['in'], n_out=params['out']))
        return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args

    # Gilmer et al. (2017), Neural Message Passing for Quantum Chemistry 
Example #27
Source File: UpdateFunction.py    From nmp_qc with MIT License 5 votes vote down vote up
def init_intnet(self, params):
        learn_args = []
        learn_modules = []
        args = {}

        args['in'] = params['in']
        args['out'] = params['out']

        learn_modules.append(NNet(n_in=params['in'], n_out=params['out']))

        return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args 
Example #28
Source File: Network.py    From GST-Tacotron with MIT License 5 votes vote down vote up
def __init__(self):
        super().__init__()

        self.conv1d_bank = Conv1dBank(K=hp.decoder_K, in_channels=hp.n_mels, out_channels=hp.E // 2)

        self.conv1d_1 = Conv1d(in_channels=hp.decoder_K * hp.E // 2, out_channels=hp.E, kernel_size=3)
        self.bn1 = BatchNorm1d(hp.E)
        self.conv1d_2 = Conv1d(in_channels=hp.E, out_channels=hp.n_mels, kernel_size=3)
        self.bn2 = BatchNorm1d(hp.n_mels)

        self.highways = nn.ModuleList()
        for i in range(hp.num_highways):
            self.highways.append(Highway(in_features=hp.n_mels, out_features=hp.n_mels))

        self.gru = nn.GRU(input_size=hp.n_mels, hidden_size=hp.E // 2, num_layers=2, bidirectional=True, batch_first=True) 
Example #29
Source File: MessageFunction.py    From nmp_qc with MIT License 5 votes vote down vote up
def init_mpnn(self, params):
        learn_args = []
        learn_modules = []
        args = {}

        args['in'] = params['in']
        args['out'] = params['out']

        # Define a parameter matrix A for each edge label.
        learn_modules.append(NNet(n_in=params['edge_feat'], n_out=(params['in']*params['out'])))

        return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args

    # Kearnes et al. (2016), Molecular Graph Convolutions 
Example #30
Source File: MessageFunction.py    From nmp_qc with MIT License 5 votes vote down vote up
def init_duvenaud(self, params):
        learn_args = []
        learn_modules = []
        args = {}
        return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args

    # Li et al. (2016), Gated Graph Neural Networks (GG-NN)