Python torch.nn.MaxPool3d() Examples
The following are 30
code examples of torch.nn.MaxPool3d().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn
, or try the search function
.
Example #1
Source File: model.py From TASED-Net with MIT License | 6 votes |
def __init__(self): super(Mixed_4d, self).__init__() self.branch0 = nn.Sequential( BasicConv3d(512, 128, kernel_size=1, stride=1), ) self.branch1 = nn.Sequential( BasicConv3d(512, 128, kernel_size=1, stride=1), SepConv3d(128, 256, kernel_size=3, stride=1, padding=1), ) self.branch2 = nn.Sequential( BasicConv3d(512, 24, kernel_size=1, stride=1), SepConv3d(24, 64, kernel_size=3, stride=1, padding=1), ) self.branch3 = nn.Sequential( nn.MaxPool3d(kernel_size=(3,3,3), stride=1, padding=1), BasicConv3d(512, 64, kernel_size=1, stride=1), )
Example #2
Source File: I3D.py From action-recognition-models-pytorch with MIT License | 6 votes |
def __init__(self, num_class): super(I3D, self).__init__() self.conv1=BasicConv3d(3,64,kernel_size=7,stride=2,padding=3) self.pool1=nn.MaxPool3d(kernel_size=(1,3,3),stride=(1,2,2),padding=(0,1,1)) self.conv2=BasicConv3d(64,64,kernel_size=1,stride=1) self.conv3=BasicConv3d(64,192,kernel_size=3,stride=1,padding=1) self.pool2=nn.MaxPool3d(kernel_size=(1,3,3),stride=(1,2,2),padding=(0,1,1)) self.Inception1=nn.Sequential(Inception_block(192, [64,96,128,16,32,32]), Inception_block(256, [128, 128, 192, 32, 96, 64])) self.pool3=nn.MaxPool3d(kernel_size=(3,3,3),stride=(2,2,2),padding=(1,1,1)) self.Inception2=nn.Sequential(Inception_block(480,[192,96,208,16,48,64]), Inception_block(512, [160, 112, 224, 24, 64, 64]), Inception_block(512, [128, 128, 256, 24, 64, 64]), Inception_block(512, [112, 144, 288, 32, 64, 64]), Inception_block(528, [256, 160, 320, 32, 128, 128])) self.pool4=nn.MaxPool3d(kernel_size=(2,2,2),stride=2) self.Inception3=nn.Sequential(Inception_block(832,[256,160,320,32,128,128]), Inception_block(832, [384, 192, 384, 48, 128, 128])) self.avg_pool=nn.AvgPool3d(kernel_size=(8,7,7)) self.dropout = nn.Dropout(0.4) self.linear=nn.Linear(1024,num_class)
Example #3
Source File: I3D.py From action-recognition-models-pytorch with MIT License | 6 votes |
def __init__(self,in_channel,out_channel): super(Inception_block, self).__init__() # out_channel=[1x1x1,3x3x3_reduce,3x3x3,3x3x3_reduce,3x3x3,pooling_reduce] self.branch1 = BasicConv3d(in_channel,out_channel[0], kernel_size=1, stride=1) self.branch2 = nn.Sequential( BasicConv3d(in_channel, out_channel[1], kernel_size=1, stride=1), BasicConv3d(out_channel[1], out_channel[2],kernel_size=3, stride=1, padding=1) ) self.branch3 = nn.Sequential( BasicConv3d(in_channel, out_channel[3], kernel_size=1, stride=1), BasicConv3d(out_channel[3], out_channel[4], kernel_size=3, stride=1, padding=1) ) self.branch4 = nn.Sequential( nn.MaxPool3d(kernel_size=3,stride=1,padding=1), BasicConv3d(in_channel, out_channel[5], kernel_size=1, stride=1), )
Example #4
Source File: nonlocalnet.py From pretorched-x with MIT License | 6 votes |
def __init__(self, block, layers, nonlocal_layers, shortcut_type='A', num_classes=339): self.inplanes = 64 super().__init__() self.conv1 = nn.Conv3d(3, 64, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False) self.bn1 = nn.BatchNorm3d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0], nonlocal_layers[0], shortcut_type) self.layer2 = self._make_layer(block, 128, layers[1], nonlocal_layers[1], shortcut_type, stride=2) self.layer3 = self._make_layer(block, 256, layers[2], nonlocal_layers[2], shortcut_type, stride=2) self.layer4 = self._make_layer(block, 512, layers[3], nonlocal_layers[3], shortcut_type, stride=2) self.avgpool = nn.AdaptiveAvgPool3d(1) self.last_linear = nn.Linear(512 * block.expansion, num_classes) self.init_weights()
Example #5
Source File: wideresnet3D.py From pretorched-x with MIT License | 6 votes |
def __init__(self, block, layers, k=1, shortcut_type='B', num_classes=400): self.inplanes = 64 super(WideResNet, self).__init__() self.conv1 = nn.Conv3d(3, 64, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False) self.bn1 = nn.BatchNorm3d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1) self.layer1 = self._make_layer(block, 64 * k, layers[0], shortcut_type) self.layer2 = self._make_layer(block, 128 * k, layers[1], shortcut_type, stride=2) self.layer3 = self._make_layer(block, 256 * k, layers[2], shortcut_type, stride=2) self.layer4 = self._make_layer(block, 512 * k, layers[3], shortcut_type, stride=2) self.avgpool = nn.AdaptiveAvgPool3d(1) self.fc = nn.Linear(512 * k * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv3d): nn.init.kaiming_normal_(m.weight, mode='fan_out') elif isinstance(m, nn.BatchNorm3d): m.weight.data.fill_(1) m.bias.data.zero_()
Example #6
Source File: resnet_2d3d.py From DPC with MIT License | 6 votes |
def __init__(self, block, layers, track_running_stats=True): super(ResNet2d3d_full, self).__init__() self.inplanes = 64 self.track_running_stats = track_running_stats bias = False self.conv1 = nn.Conv3d(3, 64, kernel_size=(1,7,7), stride=(1, 2, 2), padding=(0, 3, 3), bias=bias) self.bn1 = nn.BatchNorm3d(64, track_running_stats=track_running_stats) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)) if not isinstance(block, list): block = [block] * 4 self.layer1 = self._make_layer(block[0], 64, layers[0]) self.layer2 = self._make_layer(block[1], 128, layers[1], stride=2) self.layer3 = self._make_layer(block[2], 256, layers[2], stride=2) self.layer4 = self._make_layer(block[3], 256, layers[3], stride=2, is_final=True) # modify layer4 from exp=512 to exp=256 for m in self.modules(): if isinstance(m, nn.Conv3d): m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm3d): m.weight.data.fill_(1) m.bias.data.zero_()
Example #7
Source File: region_proposal_network.py From SlowFast-Network-pytorch with MIT License | 6 votes |
def __init__(self, num_features_out: int, anchor_ratios: List[Tuple[int, int]], anchor_sizes: List[int], pre_nms_top_n: int, post_nms_top_n: int, anchor_smooth_l1_loss_beta: float): super().__init__() self.maxpool3d = nn.MaxPool3d(kernel_size=(64, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0)) self._features = nn.Sequential( nn.Conv2d(in_channels=num_features_out, out_channels=512, kernel_size=3, padding=1), nn.ReLU() ) self._anchor_ratios = anchor_ratios self._anchor_sizes = anchor_sizes num_anchor_ratios = len(self._anchor_ratios) num_anchor_sizes = len(self._anchor_sizes) num_anchors = num_anchor_ratios * num_anchor_sizes self._pre_nms_top_n = pre_nms_top_n self._post_nms_top_n = post_nms_top_n self._anchor_smooth_l1_loss_beta = anchor_smooth_l1_loss_beta self._anchor_objectness = nn.Conv2d(in_channels=512, out_channels=num_anchors * 2, kernel_size=1) self._anchor_transformer = nn.Conv2d(in_channels=512, out_channels=num_anchors * 4, kernel_size=1)
Example #8
Source File: Fast_S3D.py From action-recognition-models-pytorch with MIT License | 6 votes |
def __init__(self, num_class): super(fast_S3D, self).__init__() self.conv1=BasicConv3d(3,64,kernel_size=(1,7,7),stride=2,padding=(0,3,3)) self.pool1=nn.MaxPool3d(kernel_size=(1,3,3),stride=(1,2,2),padding=(0,1,1)) self.conv2=BasicConv3d(64,64,kernel_size=1,stride=1) self.conv3=BasicConv3d(64,192,kernel_size=(1,3,3),stride=1,padding=(0,1,1)) self.pool2=nn.MaxPool3d(kernel_size=(1,3,3),stride=(1,2,2),padding=(0,1,1)) self.Inception1=nn.Sequential(Inception_block(192, [64,96,128,16,32,32]), Inception_block(256, [128, 128, 192, 32, 96, 64])) self.pool3=nn.MaxPool3d(kernel_size=3,stride=2,padding=1) self.Inception2=nn.Sequential(Inception_block(480,[192,96,208,16,48,64]), Inception_block(512, [160, 112, 224, 24, 64, 64]), Inception_block(512, [128, 128, 256, 24, 64, 64]), Inception_block(512, [112, 144, 288, 32, 64, 64]), Inception_block(528, [256, 160, 320, 32, 128, 128])) self.pool4=nn.MaxPool3d(kernel_size=2,stride=2) self.Inception3=nn.Sequential(S3D_block(832,[256,160,320,32,128,128]), S3D_block(832, [384, 192, 384, 48, 128, 128])) self.avg_pool=nn.AvgPool3d(kernel_size=(8,7,7)) self.dropout = nn.Dropout(0.4) self.linear=nn.Linear(1024,num_class)
Example #9
Source File: Fast_S3D.py From action-recognition-models-pytorch with MIT License | 6 votes |
def __init__(self,in_channel,out_channel): super(S3D_block, self).__init__() # out_channel=[1x1x1,3x3x3_reduce,3x3x3,3x3x3_reduce,3x3x3,pooling_reduce] self.branch1 = BasicConv3d(in_channel,out_channel[0], kernel_size=(3,1,1), stride=1,padding=(1,0,0)) self.branch2 = nn.Sequential( BasicConv3d(in_channel, out_channel[1], kernel_size=1, stride=1), BasicConv3d(out_channel[1], out_channel[1],kernel_size=(1,3,3), stride=1, padding=(0,1,1)), BasicConv3d(out_channel[1], out_channel[2], kernel_size=(3, 1, 1), stride=1, padding=(1, 0, 0)) ) self.branch3 = nn.Sequential( BasicConv3d(in_channel, out_channel[3], kernel_size=1, stride=1), BasicConv3d(out_channel[3], out_channel[3], kernel_size=(1, 3, 3), stride=1, padding= (0, 1, 1)), BasicConv3d(out_channel[3], out_channel[4], kernel_size=(3, 1, 1), stride=1, padding=(1, 0, 0)) ) self.branch4 = nn.Sequential( nn.MaxPool3d(kernel_size=3,stride=1,padding=1), BasicConv3d(in_channel, out_channel[5], kernel_size=(3,1,1), stride=1,padding=(1,0,0)) )
Example #10
Source File: Fast_S3D.py From action-recognition-models-pytorch with MIT License | 6 votes |
def __init__(self,in_channel,out_channel): super(Inception_block, self).__init__() # out_channel=[1x1x1,3x3x3_reduce,3x3x3,3x3x3_reduce,3x3x3,pooling_reduce] self.branch1 = BasicConv3d(in_channel,out_channel[0], kernel_size=(3,1,1), stride=1, padding=(1,0,0)) self.branch2 = nn.Sequential( BasicConv3d(in_channel, out_channel[1], kernel_size=1, stride=1), BasicConv3d(out_channel[1], out_channel[2],kernel_size=(1,3,3), stride=1, padding=(0,1,1)) ) self.branch3 = nn.Sequential( BasicConv3d(in_channel, out_channel[3], kernel_size=1, stride=1), BasicConv3d(out_channel[3], out_channel[4], kernel_size=(1, 3, 3), stride=1, padding= (0, 1, 1)) ) self.branch4 = nn.Sequential( nn.MaxPool3d(kernel_size=(1,3,3),stride=1,padding=(0,1,1)), BasicConv3d(in_channel, out_channel[5], kernel_size=(3,1,1), stride=1,padding=(1,0,0)) )
Example #11
Source File: S3D_G.py From action-recognition-models-pytorch with MIT License | 6 votes |
def __init__(self, num_class): super(S3D_G, self).__init__() self.conv1=BasicConv3d(3,64,kernel_size=7,stride=2,padding=3) self.pool1=nn.MaxPool3d(kernel_size=(1,3,3),stride=(1,2,2),padding=(0,1,1)) self.conv2=BasicConv3d(64,64,kernel_size=1,stride=1) self.conv3=BasicConv3d(64,192,kernel_size=3,stride=1,padding=1) self.pool2=nn.MaxPool3d(kernel_size=(1,3,3),stride=(1,2,2),padding=(0,1,1)) self.Inception1=nn.Sequential(S3D_G_block(192, [64,96,128,16,32,32]), S3D_G_block(256, [128, 128, 192, 32, 96, 64])) self.pool3=nn.MaxPool3d(kernel_size=(3,3,3),stride=(2,2,2),padding=(1,1,1)) self.Inception2=nn.Sequential(S3D_G_block(480,[192,96,208,16,48,64]), S3D_G_block(512, [160, 112, 224, 24, 64, 64]), S3D_G_block(512, [128, 128, 256, 24, 64, 64]), S3D_G_block(512, [112, 144, 288, 32, 64, 64]), S3D_G_block(528, [256, 160, 320, 32, 128, 128])) self.pool4=nn.MaxPool3d(kernel_size=(2,2,2),stride=2) self.Inception3=nn.Sequential(S3D_G_block(832,[256,160,320,32,128,128]), S3D_G_block(832, [384, 192, 384, 48, 128, 128])) self.avg_pool=nn.AvgPool3d(kernel_size=(8,7,7)) self.dropout = nn.Dropout(0.4) self.linear=nn.Linear(1024,num_class)
Example #12
Source File: inflate.py From TKP with Apache License 2.0 | 6 votes |
def inflate_pool(pool2d, time_dim=1, time_padding=0, time_stride=None, time_dilation=1): kernel_dim = (time_dim, pool2d.kernel_size, pool2d.kernel_size) padding = (time_padding, pool2d.padding, pool2d.padding) if time_stride is None: time_stride = time_dim stride = (time_stride, pool2d.stride, pool2d.stride) if isinstance(pool2d, nn.MaxPool2d): dilation = (time_dilation, pool2d.dilation, pool2d.dilation) pool3d = nn.MaxPool3d( kernel_dim, padding=padding, dilation=dilation, stride=stride, ceil_mode=pool2d.ceil_mode) elif isinstance(pool2d, nn.AvgPool2d): pool3d = nn.AvgPool3d(kernel_dim, stride=stride) else: raise ValueError( '{} is not among known pooling classes'.format(type(pool2d))) return pool3d
Example #13
Source File: S3D_G.py From action-recognition-models-pytorch with MIT License | 6 votes |
def __init__(self,in_channel,out_channel): super(S3D_G_block, self).__init__() # out_channel=[1x1x1,3x3x3_reduce,3x3x3,3x3x3_reduce,3x3x3,pooling_reduce] self.branch1 = BasicConv3d(in_channel,out_channel[0], kernel_size=(3,1,1), stride=1, padding=(1,0,0)) self.branch2 = nn.Sequential( BasicConv3d(in_channel, out_channel[1], kernel_size=1, stride=1), BasicConv3d(out_channel[1], out_channel[1],kernel_size=(1,3,3), stride=1, padding=(0,1,1)), BasicConv3d(out_channel[1], out_channel[2], kernel_size=(3, 1, 1), stride=1, padding=(1, 0, 0)) ) self.branch3 = nn.Sequential( BasicConv3d(in_channel, out_channel[3], kernel_size=1, stride=1), BasicConv3d(out_channel[3], out_channel[3], kernel_size=(1, 3, 3), stride=1, padding= (0, 1, 1)), BasicConv3d(out_channel[3], out_channel[4], kernel_size=(3, 1, 1), stride=1, padding=(1, 0, 0)) ) self.branch4 = nn.Sequential( nn.MaxPool3d(kernel_size=3,stride=1,padding=1), BasicConv3d(in_channel, out_channel[5], kernel_size=(3,1,1), stride=1,padding=(1,0,0)) ) self.squeeze = nn.AdaptiveAvgPool3d(1) # we replace weight matrix with 1D conv to reduce the para self.excitation = nn.Conv1d(1, 1, (3,1,1), stride=1,padding=(1,0,0)) self.sigmoid=nn.Sigmoid()
Example #14
Source File: model.py From TASED-Net with MIT License | 6 votes |
def __init__(self): super(Mixed_5c, self).__init__() self.branch0 = nn.Sequential( BasicConv3d(832, 384, kernel_size=1, stride=1), ) self.branch1 = nn.Sequential( BasicConv3d(832, 192, kernel_size=1, stride=1), SepConv3d(192, 384, kernel_size=3, stride=1, padding=1), ) self.branch2 = nn.Sequential( BasicConv3d(832, 48, kernel_size=1, stride=1), SepConv3d(48, 128, kernel_size=3, stride=1, padding=1), ) self.branch3 = nn.Sequential( nn.MaxPool3d(kernel_size=(3,3,3), stride=1, padding=1), BasicConv3d(832, 128, kernel_size=1, stride=1), )
Example #15
Source File: blocks.py From pytorch-UNet with MIT License | 6 votes |
def __init__( self, in_channels, middle_channels, out_channels, dropout=False, downsample_kernel=2 ): super(Encoder3D, self).__init__() layers = [ nn.MaxPool3d(kernel_size=downsample_kernel), nn.Conv3d(in_channels, middle_channels, kernel_size=3, padding=1), nn.BatchNorm3d(middle_channels), nn.ReLU(inplace=True), nn.Conv3d(middle_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm3d(out_channels), nn.ReLU(inplace=True) ] if dropout: assert 0 <= dropout <= 1, 'dropout must be between 0 and 1' layers.append(nn.Dropout3d(p=dropout)) self.encoder = nn.Sequential(*layers)
Example #16
Source File: blocks.py From pytorch-UNet with MIT License | 6 votes |
def __init__(self, in_channels, middle_channels, out_channels, deconv_channels, dropout=False): super(Center3D, self).__init__() layers = [ nn.MaxPool3d(kernel_size=2), nn.Conv3d(in_channels, middle_channels, kernel_size=3, padding=1), nn.BatchNorm3d(middle_channels), nn.ReLU(inplace=True), nn.Conv3d(middle_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm3d(out_channels), nn.ReLU(inplace=True), nn.ConvTranspose3d(out_channels, deconv_channels, kernel_size=2, stride=2) ] if dropout: assert 0 <= dropout <= 1, 'dropout must be between 0 and 1' layers.append(nn.Dropout3d(p=dropout)) self.center = nn.Sequential(*layers)
Example #17
Source File: model.py From TASED-Net with MIT License | 6 votes |
def __init__(self): super(Mixed_5b, self).__init__() self.branch0 = nn.Sequential( BasicConv3d(832, 256, kernel_size=1, stride=1), ) self.branch1 = nn.Sequential( BasicConv3d(832, 160, kernel_size=1, stride=1), SepConv3d(160, 320, kernel_size=3, stride=1, padding=1), ) self.branch2 = nn.Sequential( BasicConv3d(832, 32, kernel_size=1, stride=1), SepConv3d(32, 128, kernel_size=3, stride=1, padding=1), ) self.branch3 = nn.Sequential( nn.MaxPool3d(kernel_size=(3,3,3), stride=1, padding=1), BasicConv3d(832, 128, kernel_size=1, stride=1), )
Example #18
Source File: model.py From TASED-Net with MIT License | 6 votes |
def __init__(self): super(Mixed_4f, self).__init__() self.branch0 = nn.Sequential( BasicConv3d(528, 256, kernel_size=1, stride=1), ) self.branch1 = nn.Sequential( BasicConv3d(528, 160, kernel_size=1, stride=1), SepConv3d(160, 320, kernel_size=3, stride=1, padding=1), ) self.branch2 = nn.Sequential( BasicConv3d(528, 32, kernel_size=1, stride=1), SepConv3d(32, 128, kernel_size=3, stride=1, padding=1), ) self.branch3 = nn.Sequential( nn.MaxPool3d(kernel_size=(3,3,3), stride=1, padding=1), BasicConv3d(528, 128, kernel_size=1, stride=1), )
Example #19
Source File: unet3d.py From space_time_pde with MIT License | 6 votes |
def _create_layers(self): # num. features in downward path nfeat_down_out = [self.nf*(2**(i+1)) for i in range(self.li)] # cap the maximum number of feature layers nfeat_down_out = [n if n <= self.mf else self.mf for n in nfeat_down_out] nfeat_down_in = [self.nf] + nfeat_down_out[:-1] self.conv_in = ResBlock3D(self.in_features, self.nf, self.nf) self.conv_out = nn.Conv3d(nfeat_down_out[-1], self.out_features, kernel_size=1, stride=1) self.down_modules = [ResBlock3D(n_in, int(n/2), n) for n_in, n in zip(nfeat_down_in, nfeat_down_out)] self.down_pools = [] prev_layer_dims = np.array(self.igres) for _ in range(len(nfeat_down_out)): pool_kernel_size, next_layer_dims = self._get_pool_kernel_size(prev_layer_dims) pool_layer = nn.MaxPool3d(pool_kernel_size) self.down_pools.append(pool_layer) prev_layer_dims = next_layer_dims self.down_modules = nn.ModuleList(self.down_modules) self.down_pools = nn.ModuleList(self.down_pools)
Example #20
Source File: pooling.py From fastNLP with Apache License 2.0 | 6 votes |
def forward(self, x): if self.dimension == 1: x = torch.transpose(x, 1, 2) # [N,L,C] -> [N,C,L] pooling = nn.MaxPool1d( stride=self.stride, padding=self.padding, dilation=self.dilation, kernel_size=self.kernel_size if self.kernel_size is not None else x.size(-1), return_indices=False, ceil_mode=self.ceil_mode ) elif self.dimension == 2: pooling = nn.MaxPool2d( stride=self.stride, padding=self.padding, dilation=self.dilation, kernel_size=self.kernel_size if self.kernel_size is not None else (x.size(-2), x.size(-1)), return_indices=False, ceil_mode=self.ceil_mode ) else: pooling = nn.MaxPool3d( stride=self.stride, padding=self.padding, dilation=self.dilation, kernel_size=self.kernel_size if self.kernel_size is not None else (x.size(-3), x.size(-2), x.size(-1)), return_indices=False, ceil_mode=self.ceil_mode ) x = pooling(x) return x.squeeze(dim=-1) # [N,C,1] -> [N,C]
Example #21
Source File: resnext3d_stem.py From ClassyVision with MIT License | 6 votes |
def _construct_stem(self, dim_in, dim_out): assert ( self.stride[1] == self.stride[2] ), "Only support identical height stride and width stride" self.conv = r2plus1_unit( dim_in, dim_out, self.stride[0], # temporal_stride self.stride[1], # spatial_stride 1, # groups self.inplace_relu, self.bn_eps, self.bn_mmt, dim_mid=45, # hard-coded middle channels ) self.bn = nn.BatchNorm3d(dim_out, eps=self.bn_eps, momentum=self.bn_mmt) self.relu = nn.ReLU(self.inplace_relu) if self.maxpool: self.pool_layer = nn.MaxPool3d( kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1] )
Example #22
Source File: model.py From TASED-Net with MIT License | 6 votes |
def __init__(self): super(Mixed_3b, self).__init__() self.branch0 = nn.Sequential( BasicConv3d(192, 64, kernel_size=1, stride=1), ) self.branch1 = nn.Sequential( BasicConv3d(192, 96, kernel_size=1, stride=1), SepConv3d(96, 128, kernel_size=3, stride=1, padding=1), ) self.branch2 = nn.Sequential( BasicConv3d(192, 16, kernel_size=1, stride=1), SepConv3d(16, 32, kernel_size=3, stride=1, padding=1), ) self.branch3 = nn.Sequential( nn.MaxPool3d(kernel_size=(3,3,3), stride=1, padding=1), BasicConv3d(192, 32, kernel_size=1, stride=1), )
Example #23
Source File: model.py From TASED-Net with MIT License | 6 votes |
def __init__(self): super(Mixed_3c, self).__init__() self.branch0 = nn.Sequential( BasicConv3d(256, 128, kernel_size=1, stride=1), ) self.branch1 = nn.Sequential( BasicConv3d(256, 128, kernel_size=1, stride=1), SepConv3d(128, 192, kernel_size=3, stride=1, padding=1), ) self.branch2 = nn.Sequential( BasicConv3d(256, 32, kernel_size=1, stride=1), SepConv3d(32, 96, kernel_size=3, stride=1, padding=1), ) self.branch3 = nn.Sequential( nn.MaxPool3d(kernel_size=(3,3,3), stride=1, padding=1), BasicConv3d(256, 64, kernel_size=1, stride=1), )
Example #24
Source File: model.py From TASED-Net with MIT License | 6 votes |
def __init__(self): super(Mixed_4b, self).__init__() self.branch0 = nn.Sequential( BasicConv3d(480, 192, kernel_size=1, stride=1), ) self.branch1 = nn.Sequential( BasicConv3d(480, 96, kernel_size=1, stride=1), SepConv3d(96, 208, kernel_size=3, stride=1, padding=1), ) self.branch2 = nn.Sequential( BasicConv3d(480, 16, kernel_size=1, stride=1), SepConv3d(16, 48, kernel_size=3, stride=1, padding=1), ) self.branch3 = nn.Sequential( nn.MaxPool3d(kernel_size=(3,3,3), stride=1, padding=1), BasicConv3d(480, 64, kernel_size=1, stride=1), )
Example #25
Source File: model.py From TASED-Net with MIT License | 6 votes |
def __init__(self): super(Mixed_4c, self).__init__() self.branch0 = nn.Sequential( BasicConv3d(512, 160, kernel_size=1, stride=1), ) self.branch1 = nn.Sequential( BasicConv3d(512, 112, kernel_size=1, stride=1), SepConv3d(112, 224, kernel_size=3, stride=1, padding=1), ) self.branch2 = nn.Sequential( BasicConv3d(512, 24, kernel_size=1, stride=1), SepConv3d(24, 64, kernel_size=3, stride=1, padding=1), ) self.branch3 = nn.Sequential( nn.MaxPool3d(kernel_size=(3,3,3), stride=1, padding=1), BasicConv3d(512, 64, kernel_size=1, stride=1), )
Example #26
Source File: resnext3D.py From pretorched-x with MIT License | 6 votes |
def __init__(self, block, layers, shortcut_type='B', cardinality=32, num_classes=400): self.inplanes = 64 super(ResNeXt3D, self).__init__() self.conv1 = nn.Conv3d(3, 64, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False) self.bn1 = nn.BatchNorm3d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1) self.layer1 = self._make_layer(block, 128, layers[0], shortcut_type, cardinality) self.layer2 = self._make_layer(block, 256, layers[1], shortcut_type, cardinality, stride=2) self.layer3 = self._make_layer(block, 512, layers[2], shortcut_type, cardinality, stride=2) self.layer4 = self._make_layer(block, 1024, layers[3], shortcut_type, cardinality, stride=2) self.avgpool = nn.AdaptiveAvgPool3d(1) self.fc = nn.Linear(cardinality * 32 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv3d): m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out') elif isinstance(m, nn.BatchNorm3d): m.weight.data.fill_(1) m.bias.data.zero_()
Example #27
Source File: resnet3D.py From pretorched-x with MIT License | 5 votes |
def __init__(self, block, layers, shortcut_type='B', num_classes=339): self.inplanes = 64 super(ResNet3D, self).__init__() self.conv1 = self.Conv3d(3, 64, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False) self.bn1 = nn.BatchNorm3d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type) self.layer2 = self._make_layer(block, 128, layers[1], shortcut_type, stride=2) self.layer3 = self._make_layer(block, 256, layers[2], shortcut_type, stride=2) self.layer4 = self._make_layer(block, 512, layers[3], shortcut_type, stride=2) self.avgpool = nn.AdaptiveAvgPool3d(1) self.fc = nn.Linear(512 * block.expansion, num_classes) self.init_weights()
Example #28
Source File: resnext3d_stem.py From ClassyVision with MIT License | 5 votes |
def _construct_stem(self, dim_in, dim_out): self.conv = nn.Conv3d( dim_in, dim_out, self.kernel, stride=self.stride, padding=self.padding, bias=False, ) self.bn = nn.BatchNorm3d(dim_out, eps=self.bn_eps, momentum=self.bn_mmt) self.relu = nn.ReLU(self.inplace_relu) if self.maxpool: self.pool_layer = nn.MaxPool3d( kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1] )
Example #29
Source File: slowfast.py From pretorched-x with MIT License | 5 votes |
def _make_layers(self, block, layers): self.conv1 = nn.Conv3d(3, 64, kernel_size=(1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3), bias=False) self.bn1 = nn.BatchNorm3d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)) self.res2 = self._make_layer_slow(block, 64, layers[0], head_conv=1) # TODO: Verify that this adjustment is correct. res3_stride = 2 if issubclass(block, Bottleneck) else 1 self.res3 = self._make_layer_slow(block, 128, layers[1], stride=res3_stride, head_conv=1) self.res4 = self._make_layer_slow(block, 256, layers[2], stride=2, head_conv=3) self.res5 = self._make_layer_slow(block, 512, layers[3], stride=2, head_conv=3)
Example #30
Source File: factories.py From MONAI with Apache License 2.0 | 5 votes |
def maxpooling_factory(dim): types = [nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d] return types[dim - 1]