Python torch.nn.MaxPool2d() Examples
The following are 30
code examples of torch.nn.MaxPool2d().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn
, or try the search function
.
Example #1
Source File: model_architecture.py From models with MIT License | 8 votes |
def get_model(load_weights = True): deepsea_cpu = nn.Sequential( # Sequential, nn.Conv2d(4,320,(1, 8),(1, 1)), nn.Threshold(0, 1e-06), nn.MaxPool2d((1, 4),(1, 4)), nn.Dropout(0.2), nn.Conv2d(320,480,(1, 8),(1, 1)), nn.Threshold(0, 1e-06), nn.MaxPool2d((1, 4),(1, 4)), nn.Dropout(0.2), nn.Conv2d(480,960,(1, 8),(1, 1)), nn.Threshold(0, 1e-06), nn.Dropout(0.5), Lambda(lambda x: x.view(x.size(0),-1)), # Reshape, nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(50880,925)), # Linear, nn.Threshold(0, 1e-06), nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(925,919)), # Linear, nn.Sigmoid(), ) if load_weights: deepsea_cpu.load_state_dict(torch.load('model_files/deepsea_cpu.pth')) return nn.Sequential(ReCodeAlphabet(), deepsea_cpu)
Example #2
Source File: model.py From cat-bbs with MIT License | 8 votes |
def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(MyResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) # note the increasing dilation self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilation=1) self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4) # these layers will not be used self.avgpool = nn.AvgPool2d(7) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
Example #3
Source File: resnet_v1.py From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License | 7 votes |
def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) # maxpool different from pytorch-resnet, to match tf-faster-rcnn self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) # use stride 1 for the last conv4 layer (same as tf-faster-rcnn) self.layer4 = self._make_layer(block, 512, layers[3], stride=1) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
Example #4
Source File: Inception_all.py From Pytorch-Networks with MIT License | 6 votes |
def __init__(self,in_dim,conv1,conv3_r,conv3,conv5_r,conv5,pool): super(_InceptionV1Block,self).__init__() self.conv1_branch = nn.Sequential( nn.Conv2d(in_dim,conv1,1,1,0,bias=False), nn.ReLU(inplace=True),) self.conv3_branch = nn.Sequential( nn.Conv2d(in_dim,conv3_r,1,1,0,bias=False), nn.ReLU(inplace=True), nn.Conv2d(conv3_r,conv3,3,1,1,bias=False), nn.ReLU(inplace=True),) self.conv5_branch = nn.Sequential( nn.Conv2d(in_dim,conv5_r,1,1,0,bias=False), nn.ReLU(inplace=True), nn.Conv2d(conv5_r,conv5,5,1,2,bias=False), nn.ReLU(inplace=True),) self.pool_branch = nn.Sequential( nn.MaxPool2d(3,1,1), nn.Conv2d(in_dim,pool,1,1,0,bias=False), nn.ReLU(inplace=True),)
Example #5
Source File: vgg.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _make_layers(self, cfg): layers = [] in_channels = 3 for x in cfg: if x == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.ReLU(inplace=True)] in_channels = x layers += [nn.AvgPool2d(kernel_size=1, stride=1)] return nn.Sequential(*layers) # net = VGG('VGG11') # x = torch.randn(2,3,32,32) # print(net(Variable(x)).size())
Example #6
Source File: convert_Basset_to_pytorch.py From models with MIT License | 6 votes |
def simplify_source(s): s = map(lambda x: x.replace(',(1, 1),(0, 0),1,1,bias=True),#Conv2d',')'),s) s = map(lambda x: x.replace(',(0, 0),1,1,bias=True),#Conv2d',')'),s) s = map(lambda x: x.replace(',1,1,bias=True),#Conv2d',')'),s) s = map(lambda x: x.replace(',bias=True),#Conv2d',')'),s) s = map(lambda x: x.replace('),#Conv2d',')'),s) s = map(lambda x: x.replace(',1e-05,0.1,True),#BatchNorm2d',')'),s) s = map(lambda x: x.replace('),#BatchNorm2d',')'),s) s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#MaxPool2d',')'),s) s = map(lambda x: x.replace(',ceil_mode=False),#MaxPool2d',')'),s) s = map(lambda x: x.replace('),#MaxPool2d',')'),s) s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#AvgPool2d',')'),s) s = map(lambda x: x.replace(',ceil_mode=False),#AvgPool2d',')'),s) s = map(lambda x: x.replace(',bias=True)),#Linear',')), # Linear'),s) s = map(lambda x: x.replace(')),#Linear',')), # Linear'),s) s = map(lambda x: '{},\n'.format(x),s) s = map(lambda x: x[1:],s) s = reduce(lambda x,y: x+y, s) return s
Example #7
Source File: model_architecture.py From models with MIT License | 6 votes |
def get_seqpred_model(load_weights = True): deepsea_cpu = nn.Sequential( # Sequential, nn.Conv2d(4,320,(1, 8),(1, 1)), nn.Threshold(0, 1e-06), nn.MaxPool2d((1, 4),(1, 4)), nn.Dropout(0.2), nn.Conv2d(320,480,(1, 8),(1, 1)), nn.Threshold(0, 1e-06), nn.MaxPool2d((1, 4),(1, 4)), nn.Dropout(0.2), nn.Conv2d(480,960,(1, 8),(1, 1)), nn.Threshold(0, 1e-06), nn.Dropout(0.5), Lambda(lambda x: x.view(x.size(0),-1)), # Reshape, nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(50880,925)), # Linear, nn.Threshold(0, 1e-06), nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(925,919)), # Linear, nn.Sigmoid(), ) if load_weights: deepsea_cpu.load_state_dict(torch.load('model_files/deepsea_cpu.pth')) return nn.Sequential(ReCodeAlphabet(), ConcatenateRC(), deepsea_cpu, AverageRC())
Example #8
Source File: vgg.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _make_layers(self, cfg): layers = [] in_channels = 3 for x in cfg: if x == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.ReLU(inplace=True)] in_channels = x layers += [nn.AvgPool2d(kernel_size=1, stride=1)] return nn.Sequential(*layers) # net = VGG('VGG11') # x = torch.randn(2,3,32,32) # print(net(Variable(x)).size())
Example #9
Source File: googlenet.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self): super(GoogLeNet, self).__init__() self.pre_layers = nn.Sequential( nn.Conv2d(3, 192, kernel_size=3, padding=1), nn.BatchNorm2d(192), nn.ReLU(True), ) self.a3 = Inception(192, 64, 96, 128, 16, 32, 32) self.b3 = Inception(256, 128, 128, 192, 32, 96, 64) self.maxpool = nn.MaxPool2d(3, stride=2, padding=1) self.a4 = Inception(480, 192, 96, 208, 16, 48, 64) self.b4 = Inception(512, 160, 112, 224, 24, 64, 64) self.c4 = Inception(512, 128, 128, 256, 24, 64, 64) self.d4 = Inception(512, 112, 144, 288, 32, 64, 64) self.e4 = Inception(528, 256, 160, 320, 32, 128, 128) self.a5 = Inception(832, 256, 160, 320, 32, 128, 128) self.b5 = Inception(832, 384, 192, 384, 48, 128, 128) self.avgpool = nn.AvgPool2d(8, stride=1) self.linear = nn.Linear(1024, 10)
Example #10
Source File: albunet.py From neural-pipeline with MIT License | 6 votes |
def __init__(self, block, layers, in_channels=3): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
Example #11
Source File: base.py From fast-MPN-COV with MIT License | 6 votes |
def _reconstruct_inception(self, basemodel): model = nn.Module() model.features = nn.Sequential(basemodel.Conv2d_1a_3x3, basemodel.Conv2d_2a_3x3, basemodel.Conv2d_2b_3x3, nn.MaxPool2d(kernel_size=3, stride=2), basemodel.Conv2d_3b_1x1, basemodel.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2), basemodel.Mixed_5b, basemodel.Mixed_5c, basemodel.Mixed_5d, basemodel.Mixed_6a, basemodel.Mixed_6b, basemodel.Mixed_6c, basemodel.Mixed_6d, basemodel.Mixed_6e, basemodel.Mixed_7a, basemodel.Mixed_7b, basemodel.Mixed_7c) model.representation = nn.AdaptiveAvgPool2d((1, 1)) model.classifier = basemodel.fc model.representation_dim=basemodel.fc.weight.size(1) return model
Example #12
Source File: cnns.py From cvpr2018-hnd with MIT License | 6 votes |
def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AvgPool2d(7, stride=1) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
Example #13
Source File: model.py From ACAN with MIT License | 6 votes |
def __init__(self, min_depth, max_depth, num_classes, classifierType, inferenceType, decoderType, height, width, alpha=0, beta=0, layers=[3, 4, 6, 3], block=Bottleneck): # Note: classifierType: CE=Cross Entropy, OR=Ordinal Regression super(ResNet, self).__init__(min_depth, max_depth, num_classes, classifierType, inferenceType, decoderType) self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7,stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu1 = nn.ReLU(True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4) self.alpha = alpha self.beta = beta self.decoder = make_decoder(height, width, decoderType) self.use_inter = self.alpha != 0.0 self.interout, self.classifier = make_classifier(classifierType, num_classes, self.use_inter, channel1=1024, channel2=2048) self.parameter_initialization()
Example #14
Source File: module.py From End-to-end-ASR-Pytorch with MIT License | 6 votes |
def __init__(self, input_dim): super(VGGExtractor, self).__init__() self.init_dim = 64 self.hide_dim = 128 in_channel, freq_dim, out_dim = self.check_dim(input_dim) self.in_channel = in_channel self.freq_dim = freq_dim self.out_dim = out_dim self.extractor = nn.Sequential( nn.Conv2d(in_channel, self.init_dim, 3, stride=1, padding=1), nn.ReLU(), nn.Conv2d(self.init_dim, self.init_dim, 3, stride=1, padding=1), nn.ReLU(), nn.MaxPool2d(2, stride=2), # Half-time dimension nn.Conv2d(self.init_dim, self.hide_dim, 3, stride=1, padding=1), nn.ReLU(), nn.Conv2d(self.hide_dim, self.hide_dim, 3, stride=1, padding=1), nn.ReLU(), nn.MaxPool2d(2, stride=2) # Half-time dimension )
Example #15
Source File: resnet.py From cascade-rcnn_Pytorch with MIT License | 6 votes |
def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) # change self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) # different self.avgpool = nn.AvgPool2d(7) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
Example #16
Source File: detnet_backbone.py From cascade-rcnn_Pytorch with MIT License | 6 votes |
def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(DetNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_new_layer(256, layers[3]) self.layer5 = self._make_new_layer(256, layers[4]) self.avgpool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(1024, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
Example #17
Source File: vgg.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _make_layers(self, cfg): layers = [] in_channels = 3 for x in cfg: if x == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.ReLU(inplace=True)] in_channels = x layers += [nn.AvgPool2d(kernel_size=1, stride=1)] return nn.Sequential(*layers) # net = VGG('VGG11') # x = torch.randn(2,3,32,32) # print(net(Variable(x)).size())
Example #18
Source File: resnet.py From cascade-rcnn_Pytorch with MIT License | 6 votes |
def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) # change self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) # different self.avgpool = nn.AvgPool2d(7) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
Example #19
Source File: detnet_backbone.py From cascade-rcnn_Pytorch with MIT License | 6 votes |
def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(DetNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_new_layer(256, layers[3]) self.layer5 = self._make_new_layer(256, layers[4]) self.avgpool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(1024, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
Example #20
Source File: digit_network.py From transferlearning with MIT License | 6 votes |
def __init__(self): super(Network, self).__init__() self.feature = nn.Sequential() self.feature.add_module('f_conv1', nn.Conv2d(3, 64, kernel_size=5)) self.feature.add_module('f_bn1', nn.BatchNorm2d(64)) self.feature.add_module('f_pool1', nn.MaxPool2d(2)) self.feature.add_module('f_relu1', nn.ReLU(True)) self.feature.add_module('f_conv2', nn.Conv2d(64, 50, kernel_size=5)) self.feature.add_module('f_bn2', nn.BatchNorm2d(50)) self.feature.add_module('f_drop1', nn.Dropout2d()) self.feature.add_module('f_pool2', nn.MaxPool2d(2)) self.feature.add_module('f_relu2', nn.ReLU(True)) self.class_classifier = nn.Sequential() self.class_classifier.add_module('c_fc1', nn.Linear(50 * 5 * 5, 100)) self.class_classifier.add_module('c_bn1', nn.BatchNorm1d(100)) self.class_classifier.add_module('c_relu1', nn.ReLU(True)) self.class_classifier.add_module('c_drop1', nn.Dropout2d()) self.class_classifier.add_module('c_fc2', nn.Linear(100, 500)) self.class_classifier.add_module('c_bn2', nn.BatchNorm1d(500)) self.class_classifier.add_module('c_relu2', nn.ReLU(True)) self.class_classifier.add_module('c_fc3', nn.Linear(500, 10))
Example #21
Source File: ResNet.py From transferlearning with MIT License | 6 votes |
def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AvgPool2d(8, stride=1) self.baselayer = [self.conv1, self.bn1, self.layer1, self.layer2, self.layer3, self.layer4] self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
Example #22
Source File: DenseNet2016.py From Pytorch-Networks with MIT License | 6 votes |
def __init__(self,k,block_list,num_init_features=64, bn_size=4, drop_rate=0, memory_efficient=False): super(DenseNet,self).__init__() self.head_conv = nn.Sequential( nn.Conv2d(3,num_init_features,7,2,3,bias=False), nn.BatchNorm2d(num_init_features), nn.ReLU(inplace=True),) self.maxpool_1 = nn.MaxPool2d(3,2,1) self.dense_body, self.final_channels = self._make_layers(num_init_features, bn_size,block_list,k,drop_rate, memory_efficient) self.avgpool_1 = nn.AdaptiveAvgPool2d((1,1)) self.fc_1 = nn.Sequential( nn.Flatten(), nn.Linear(self.final_channels,1000), nn.Softmax(dim = 1),) self._initialization()
Example #23
Source File: ResNet.py From transferlearning with MIT License | 6 votes |
def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AvgPool2d(7, stride=1) self.baselayer = [self.conv1, self.bn1, self.layer1, self.layer2, self.layer3, self.layer4] self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
Example #24
Source File: GNNlikeCNN2015.py From Pytorch-Networks with MIT License | 6 votes |
def make_stages(cfg_dict): """Builds CPM stages from a dictionary Args: cfg_dict: a dictionary """ layers = [] for i in range(len(cfg_dict) - 1): one_ = cfg_dict[i] for k, v in one_.items(): if 'pool' in k: layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2])] else: conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4]) layers += [conv2d, nn.ReLU(inplace=True)] one_ = list(cfg_dict[-1].keys()) k = one_[0] v = cfg_dict[-1][k] conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4]) layers += [conv2d] return nn.Sequential(*layers)
Example #25
Source File: GNNlikeCNN2015.py From Pytorch-Networks with MIT License | 6 votes |
def make_vgg19_block(block): """Builds a vgg19 block from a dictionary Args: block: a dictionary """ layers = [] for i in range(len(block)): one_ = block[i] for k, v in one_.items(): if 'pool' in k: layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2])] else: conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4]) layers += [conv2d, nn.ReLU(inplace=True)] return nn.Sequential(*layers)
Example #26
Source File: OpenPose2015.py From Pytorch-Networks with MIT License | 6 votes |
def __init__(self,input_dim): super(VGG_19,self).__init__() self.conv1_1 = conv(input_dim,64,3,1,1) self.conv1_2 = conv(64,64,3,1,1) self.pooling_1 = nn.MaxPool2d(2,2,0) self.conv2_1 = conv(64,128,3,1,1) self.conv2_2 = conv(128,128,3,1,1) self.pooling_2 = nn.MaxPool2d(2,2,0) self.conv3_1 = conv(128,256,3,1,1) self.conv3_2 = conv(256,256,3,1,1) self.conv3_3 = conv(256,256,3,1,1) self.conv3_4 = conv(256,256,3,1,1) self.pooling_3 = nn.MaxPool2d(2,2,0) self.conv4_1 = conv(256,512,3,1,1) self.conv4_2 = conv(512,512,3,1,1) self.conv4_3 = conv(512,256,3,1,1) self.conv4_4 = conv(256,128,3,1,1)
Example #27
Source File: CNN_GNN2018.py From Pytorch-Networks with MIT License | 6 votes |
def make_stages(cfg_dict): """Builds CPM stages from a dictionary Args: cfg_dict: a dictionary """ layers = [] for i in range(len(cfg_dict) - 1): one_ = cfg_dict[i] for k, v in one_.items(): if 'pool' in k: layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2])] else: conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4]) layers += [conv2d, nn.ReLU(inplace=True)] one_ = list(cfg_dict[-1].keys()) k = one_[0] v = cfg_dict[-1][k] conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4]) layers += [conv2d] return nn.Sequential(*layers)
Example #28
Source File: SimpleBaseline2017.py From Pytorch-Networks with MIT License | 6 votes |
def __init__(self, block, layers,in_dim,out_dim): self.inplanes = 64 self.deconv_with_bias = False super(PoseResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) # used for deconv layers self.deconv_layers = self._make_deconv_layer( 3, [256,256,256], [4,4,4], )
Example #29
Source File: ResNetV2.py From Pytorch-Networks with MIT License | 6 votes |
def __init__(self,block,block_list): super(ResNet,self).__init__() self.head_conv = nn.Sequential( nn.Conv2d(3,64,7,2,3,bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True),) self.maxpool_1 = nn.MaxPool2d(3,2,1) b_ = block.expansion self.layer_1 = self._make_layer(block,64,64*b_,block_list[0],1) self.layer_2 = self._make_layer(block,64*b_,128*b_,block_list[1],2) self.layer_3 = self._make_layer(block,128*b_,256*b_,block_list[2],2) self.layer_4 = self._make_layer(block,256*b_,512*b_,block_list[3],2) self.avgpool_1 = nn.AdaptiveAvgPool2d((1,1)) self.fc_1 = nn.Sequential( nn.Flatten(), nn.Linear(512*b_,1000), nn.Softmax(dim = 1),) self._initialization()
Example #30
Source File: ResNeXt2016.py From Pytorch-Networks with MIT License | 6 votes |
def __init__(self,block,block_list,cardinality): super(ResNet,self).__init__() self.head_conv = nn.Sequential( nn.Conv2d(3,64,7,2,3,bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True),) self.maxpool_1 = nn.MaxPool2d(3,2,1) b_ = block.expansion self.layer_1 = self._make_layer(block,64,128*b_,block_list[0],1,cardinality) self.layer_2 = self._make_layer(block,128*b_,256*b_,block_list[1],2,cardinality) self.layer_3 = self._make_layer(block,256*b_,512*b_,block_list[2],2,cardinality) self.layer_4 = self._make_layer(block,512*b_,1024*b_,block_list[3],2,cardinality) self.avgpool_1 = nn.AdaptiveAvgPool2d((1,1)) self.fc_1 = nn.Sequential( nn.Flatten(), nn.Linear(1024*b_,1000), nn.Softmax(dim = 1),) self._initialization()