Python chainer.functions.local_response_normalization() Examples
The following are 22
code examples of chainer.functions.local_response_normalization().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.functions
, or try the search function
.
Example #1
Source File: AlexNet.py From deeppose with GNU General Public License v2.0 | 6 votes |
def __call__(self, x): h = F.relu(self.conv1(x)) h = F.max_pooling_2d(h, 3, stride=2) h = F.local_response_normalization(h) h = F.relu(self.conv2(h)) h = F.max_pooling_2d(h, 3, stride=2) h = F.local_response_normalization(h) h = F.relu(self.conv3(h)) h = F.relu(self.conv4(h)) h = F.relu(self.conv5(h)) h = F.max_pooling_2d(h, 3, stride=2) h = F.dropout(F.relu(self.fc6(h)), train=self.train, ratio=0.6) h = F.dropout(F.relu(self.fc7(h)), train=self.train, ratio=0.6) return self.fc8(h)
Example #2
Source File: mdl_rgb_d.py From MultimodalDL with MIT License | 6 votes |
def __call__(self, x, t): self.clear() h = F.max_pooling_2d(F.relu( F.local_response_normalization(self.conv1(x))), 3, stride=2) h = F.max_pooling_2d(F.relu( F.local_response_normalization(self.conv2(h))), 3, stride=2) h = F.relu(self.conv3(h)) h = F.relu(self.conv4(h)) h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2) h = F.dropout(F.relu(self.fc6(h)), train=self.train) h = F.dropout(F.relu(self.fc7(h)), train=self.train) h = self.fc8(h) self.loss = F.softmax_cross_entropy(h, t) self.accuracy = F.accuracy(h, t) return self.loss
Example #3
Source File: mdl_full.py From MultimodalDL with MIT License | 5 votes |
def __call__(self, x, y, t): self.clear() hR = F.max_pooling_2d(F.relu( F.local_response_normalization(self.convR1(x))), 3, stride=2) hR = F.max_pooling_2d(F.relu( F.local_response_normalization(self.convR2(hR))), 3, stride=2) hR = F.relu(self.convR3(hR)) hR = F.relu(self.convR4(hR)) hR = F.max_pooling_2d(F.relu(self.convR5(hR)), 3, stride=2) hR = F.dropout(F.relu(self.fcR6(hR)), train=self.train) hR = F.dropout(F.relu(self.fcR7(hR)), train=self.train) hD = F.max_pooling_2d(F.relu( F.local_response_normalization(self.convD1(y))), 3, stride=2) hD = F.max_pooling_2d(F.relu( F.local_response_normalization(self.convD2(hD))), 3, stride=2) hD = F.relu(self.convD3(hD)) hD = F.relu(self.convD4(hD)) hD = F.max_pooling_2d(F.relu(self.convD5(hD)), 3, stride=2) hD = F.dropout(F.relu(self.fcD6(hD)), train=self.train) hD = F.dropout(F.relu(self.fcD7(hD)), train=self.train) h = F.dropout(F.relu(self.fc8(hR, hD)), train=self.train) h = self.fc9(h) self.loss = F.softmax_cross_entropy(h, t) self.accuracy = F.accuracy(h, t) return self.loss
Example #4
Source File: caffefunction.py From deel with MIT License | 5 votes |
def _setup_lrn(self, layer): param = layer.lrn_param if param.norm_region != param.ACROSS_CHANNELS: raise RuntimeError('Within-channel LRN is not supported') fwd = _SingleArgumentFunction( functions.local_response_normalization, n=param.local_size, k=param.k, alpha=param.alpha / param.local_size, beta=param.beta) self.forwards[layer.name] = fwd self._add_layer(layer)
Example #5
Source File: googlenet.py From deel with MIT License | 5 votes |
def forward(self, x): h = F.relu(self.conv1(x)) h = F.local_response_normalization( F.max_pooling_2d(h, 3, stride=2), n=5) h = F.relu(self.conv2_reduce(h)) h = F.relu(self.conv2(h)) h = F.max_pooling_2d( F.local_response_normalization(h, n=5), 3, stride=2) h = self.inc3a(h) h = self.inc3b(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc4a(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss1_conv(l)) l = F.relu(self.loss1_fc1(l)) l = self.loss1_fc2(l) loss1 = l h = self.inc4b(h) h = self.inc4c(h) h = self.inc4d(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss2_conv(l)) l = F.relu(self.loss2_fc1(l)) l = self.loss2_fc2(l) loss2 = l h = self.inc4e(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc5a(h) h = self.inc5b(h) h = F.average_pooling_2d(h, 7, stride=1) h = self.loss3_fc(F.dropout(h, 0.4, train=self.train)) loss3 = h return loss1,loss2,loss3
Example #6
Source File: models.py From chainer-gogh with MIT License | 5 votes |
def forward(self, x): y1 = self.model['conv1/7x7_s2'](x) h = F.relu(y1) h = F.local_response_normalization(self.pool_func(h, 3, stride=2), n=5) h = F.relu(self.model['conv2/3x3_reduce'](h)) y2 = self.model['conv2/3x3'](h) h = F.relu(y2) h = self.pool_func(F.local_response_normalization(h, n=5), 3, stride=2) out1 = self.model['inception_3a/1x1'](h) out3 = self.model['inception_3a/3x3'](F.relu(self.model['inception_3a/3x3_reduce'](h))) out5 = self.model['inception_3a/5x5'](F.relu(self.model['inception_3a/5x5_reduce'](h))) pool = self.model['inception_3a/pool_proj'](self.pool_func(h, 3, stride=1, pad=1)) y3 = F.concat((out1, out3, out5, pool), axis=1) h = F.relu(y3) out1 = self.model['inception_3b/1x1'](h) out3 = self.model['inception_3b/3x3'](F.relu(self.model['inception_3b/3x3_reduce'](h))) out5 = self.model['inception_3b/5x5'](F.relu(self.model['inception_3b/5x5_reduce'](h))) pool = self.model['inception_3b/pool_proj'](self.pool_func(h, 3, stride=1, pad=1)) y4 = F.concat((out1, out3, out5, pool), axis=1) h = F.relu(y4) h = self.pool_func(h, 3, stride=2) out1 = self.model['inception_4a/1x1'](h) out3 = self.model['inception_4a/3x3'](F.relu(self.model['inception_4a/3x3_reduce'](h))) out5 = self.model['inception_4a/5x5'](F.relu(self.model['inception_4a/5x5_reduce'](h))) pool = self.model['inception_4a/pool_proj'](self.pool_func(h, 3, stride=1, pad=1)) y5 = F.concat((out1, out3, out5, pool), axis=1) h = F.relu(y5) out1 = self.model['inception_4b/1x1'](h) out3 = self.model['inception_4b/3x3'](F.relu(self.model['inception_4b/3x3_reduce'](h))) out5 = self.model['inception_4b/5x5'](F.relu(self.model['inception_4b/5x5_reduce'](h))) pool = self.model['inception_4b/pool_proj'](self.pool_func(h, 3, stride=1, pad=1)) y6 = F.concat((out1, out3, out5, pool), axis=1) h = F.relu(y6) return [y1,y2,y3,y4,y5,y6]
Example #7
Source File: modified_googlenet.py From deep_metric_learning with MIT License | 5 votes |
def __call__(self, x, subtract_mean=True): if subtract_mean: x = x - self._image_mean # h = super(ModifiedGoogLeNet, self).__call__( # x, layers=['pool5'], train=train)['pool5'] # h = self.bn_fc(h, test=not train) # y = self.fc(h) # return y h = F.relu(self.conv1(x)) h = F.max_pooling_2d(h, 3, stride=2) h = F.local_response_normalization(h, n=5, k=1, alpha=1e-4/5) h = F.relu(self.conv2_reduce(h)) h = F.relu(self.conv2(h)) h = F.local_response_normalization(h, n=5, k=1, alpha=1e-4/5) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc3a(h) h = self.inc3b(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc4a(h) h = self.inc4b(h) h = self.inc4c(h) h = self.inc4d(h) h = self.inc4e(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc5a(h) h = self.inc5b(h) h = F.average_pooling_2d(h, 7, stride=1) h = self.bn_fc(h) y = self.fc(h) if self.normalize_output: y = F.normalize(y) return y
Example #8
Source File: models.py From hyperface with MIT License | 5 votes |
def __call__(self, x_img, t_detection, **others): # Alexnet h = F.relu(self.conv1(x_img)) # conv1 h = F.max_pooling_2d(h, 3, stride=2, pad=0) # max1 h = F.local_response_normalization(h) # norm1 h = F.relu(self.conv2(h)) # conv2 h = F.max_pooling_2d(h, 3, stride=2, pad=0) # max2 h = F.local_response_normalization(h) # norm2 h = F.relu(self.conv3(h)) # conv3 h = F.relu(self.conv4(h)) # conv4 h = F.relu(self.conv5(h)) # conv5 h = F.max_pooling_2d(h, 3, stride=2, pad=0) # pool5 h = F.dropout(F.relu(self.fc6(h)), train=self.train) # fc6 h = F.dropout(F.relu(self.fc7(h)), train=self.train) # fc7 h_detection = self.fc8(h) # fc8 # Loss loss = F.softmax_cross_entropy(h_detection, t_detection) chainer.report({'loss': loss}, self) # Prediction h_detection = F.argmax(h_detection, axis=1) # Report results predict_data = {'img': x_img, 'detection': h_detection} teacher_data = {'img': x_img, 'detection': t_detection} chainer.report({'predict': predict_data}, self) chainer.report({'teacher': teacher_data}, self) # Report layer weights chainer.report({'conv1_w': {'weights': self.conv1.W}, 'conv2_w': {'weights': self.conv2.W}, 'conv3_w': {'weights': self.conv3.W}, 'conv4_w': {'weights': self.conv4.W}, 'conv5_w': {'weights': self.conv5.W}}, self) return loss
Example #9
Source File: segnet.py From chainer-segnet with MIT License | 5 votes |
def __call__(self, x, depth=1): assert 1 <= depth <= self.n_encdec h = F.local_response_normalization(x, 5, 1, 0.0005, 0.75) # Unchain the inner EncDecs after the given depth encdec = getattr(self, 'encdec{}'.format(depth)) encdec.inside = None h = self.encdec1(h, train=self.train) h = self.conv_cls(h) return h
Example #10
Source File: googlenet.py From convnet-benchmarks with MIT License | 5 votes |
def forward(self, x): h = F.relu(self.conv1(x)) h = F.local_response_normalization( F.max_pooling_2d(h, 3, stride=2), n=5) h = F.relu(self.conv2_reduce(h)) h = F.relu(self.conv2(h)) h = F.max_pooling_2d( F.local_response_normalization(h, n=5), 3, stride=2) h = self.inc3a(h) h = self.inc3b(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc4a(h) if chainer.config.train: out1 = F.average_pooling_2d(h, 5, stride=3) out1 = F.relu(self.loss1_conv(out1)) out1 = F.relu(self.loss1_fc1(out1)) out1 = self.loss1_fc2(out1) h = self.inc4b(h) h = self.inc4c(h) h = self.inc4d(h) if chainer.config.train: out2 = F.average_pooling_2d(h, 5, stride=3) out2 = F.relu(self.loss2_conv(out2)) out2 = F.relu(self.loss2_fc1(out2)) out2 = self.loss2_fc2(out2) h = self.inc4e(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc5a(h) h = self.inc5b(h) h = F.dropout(F.average_pooling_2d(h, 7, stride=1), 0.4) out3 = self.loss3_fc(h) return out1, out2, out3
Example #11
Source File: segnet_basic.py From chainercv with MIT License | 5 votes |
def forward(self, x): """Compute an image-wise score from a batch of images Args: x (chainer.Variable): A variable with 4D image array. Returns: chainer.Variable: An image-wise score. Its channel size is :obj:`self.n_class`. """ h = F.local_response_normalization(x, 5, 1, 1e-4 / 5., 0.75) h, indices1 = F.max_pooling_2d( F.relu(self.conv1_bn(self.conv1(h))), 2, 2, return_indices=True) h, indices2 = F.max_pooling_2d( F.relu(self.conv2_bn(self.conv2(h))), 2, 2, return_indices=True) h, indices3 = F.max_pooling_2d( F.relu(self.conv3_bn(self.conv3(h))), 2, 2, return_indices=True) h, indices4 = F.max_pooling_2d( F.relu(self.conv4_bn(self.conv4(h))), 2, 2, return_indices=True) h = self._upsampling_2d(h, indices4) h = self.conv_decode4_bn(self.conv_decode4(h)) h = self._upsampling_2d(h, indices3) h = self.conv_decode3_bn(self.conv_decode3(h)) h = self._upsampling_2d(h, indices2) h = self.conv_decode2_bn(self.conv_decode2(h)) h = self._upsampling_2d(h, indices1) h = self.conv_decode1_bn(self.conv_decode1(h)) score = self.conv_classifier(h) return score
Example #12
Source File: alex.py From mlimages with MIT License | 5 votes |
def predict(self, x): h = F.max_pooling_2d(F.relu( F.local_response_normalization(self.conv1(x))), 3, stride=2) h = F.max_pooling_2d(F.relu( F.local_response_normalization(self.conv2(h))), 3, stride=2) h = F.relu(self.conv3(h)) h = F.relu(self.conv4(h)) h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2) h = F.dropout(F.relu(self.fc6(h)), train=self.train) h = F.dropout(F.relu(self.fc7(h)), train=self.train) h = F.dropout(F.relu(self.fc8(h)), train=self.train) h = self.fc9(h) return h
Example #13
Source File: caffe_function.py From chainer with MIT License | 5 votes |
def _setup_lrn(self, layer): param = layer.lrn_param if param.norm_region != param.ACROSS_CHANNELS: raise RuntimeError('Within-channel LRN is not supported') fwd = _SingleArgumentFunction( functions.local_response_normalization, n=param.local_size, k=param.k, alpha=param.alpha / param.local_size, beta=param.beta) self.forwards[layer.name] = fwd self._add_layer(layer)
Example #14
Source File: test_local_response_normalization.py From chainer with MIT License | 5 votes |
def forward(self, inputs, device): x, = inputs y = functions.local_response_normalization(x) return y,
Example #15
Source File: alexnet.py From imgclsmob with MIT License | 5 votes |
def __call__(self, x): x = super(AlexConv, self).__call__(x) if self.use_lrn: x = F.local_response_normalization(x) return x
Example #16
Source File: LRN.py From chainer-compiler with MIT License | 5 votes |
def forward(self, x): return F.local_response_normalization(x) # ===========================================
Example #17
Source File: LRN.py From chainer-compiler with MIT License | 5 votes |
def forward(self, x): return F.local_response_normalization(x) # ===========================================
Example #18
Source File: googlenet.py From chainer with MIT License | 4 votes |
def __call__(self, x, t): h = F.relu(self.conv1(x)) h = F.local_response_normalization( F.max_pooling_2d(h, 3, stride=2), n=5) h = F.relu(self.conv2_reduce(h)) h = F.relu(self.conv2(h)) h = F.max_pooling_2d( F.local_response_normalization(h, n=5), 3, stride=2) h = self.inc3a(h) h = self.inc3b(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc4a(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss1_conv(l)) l = F.relu(self.loss1_fc1(l)) l = self.loss1_fc2(l) loss1 = F.softmax_cross_entropy(l, t) h = self.inc4b(h) h = self.inc4c(h) h = self.inc4d(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss2_conv(l)) l = F.relu(self.loss2_fc1(l)) l = self.loss2_fc2(l) loss2 = F.softmax_cross_entropy(l, t) h = self.inc4e(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc5a(h) h = self.inc5b(h) h = F.average_pooling_2d(h, 7, stride=1) h = self.loss3_fc(F.dropout(h, 0.4)) loss3 = F.softmax_cross_entropy(h, t) loss = 0.3 * (loss1 + loss2) + loss3 accuracy = F.accuracy(h, t) chainer.report({ 'loss': loss, 'loss1': loss1, 'loss2': loss2, 'loss3': loss3, 'accuracy': accuracy }, self) return loss
Example #19
Source File: googlenet.py From chainer with MIT License | 4 votes |
def forward(self, x, t): h = F.relu(self.conv1(x)) h = F.local_response_normalization( F.max_pooling_2d(h, 3, stride=2), n=5) h = F.relu(self.conv2_reduce(h)) h = F.relu(self.conv2(h)) h = F.max_pooling_2d( F.local_response_normalization(h, n=5), 3, stride=2) h = self.inc3a(h) h = self.inc3b(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc4a(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss1_conv(l)) l = F.relu(self.loss1_fc1(l)) l = self.loss1_fc2(l) loss1 = F.softmax_cross_entropy(l, t) h = self.inc4b(h) h = self.inc4c(h) h = self.inc4d(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss2_conv(l)) l = F.relu(self.loss2_fc1(l)) l = self.loss2_fc2(l) loss2 = F.softmax_cross_entropy(l, t) h = self.inc4e(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc5a(h) h = self.inc5b(h) h = F.average_pooling_2d(h, 7, stride=1) h = self.loss3_fc(F.dropout(h, 0.4)) loss3 = F.softmax_cross_entropy(h, t) loss = 0.3 * (loss1 + loss2) + loss3 accuracy = F.accuracy(h, t) chainer.report({ 'loss': loss, 'loss1': loss1, 'loss2': loss2, 'loss3': loss3, 'accuracy': accuracy }, self) return loss
Example #20
Source File: GoogleNet.py From chainer-compiler with MIT License | 4 votes |
def forward(self, x, t): h = F.relu(self.conv1(x)) # return h h = F.local_response_normalization( F.max_pooling_2d(h, 3, stride=2), n=5) h = F.relu(self.conv2_reduce(h)) h = F.relu(self.conv2(h)) h = F.max_pooling_2d( F.local_response_normalization(h, n=5), 3, stride=2) h = self.inc3a(h) h = self.inc3b(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc4a(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss1_conv(l)) l = F.relu(self.loss1_fc1(l)) l = self.loss1_fc2(l) loss1 = F.softmax_cross_entropy(l, t) # return loss1 h = self.inc4b(h) h = self.inc4c(h) h = self.inc4d(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss2_conv(l)) l = F.relu(self.loss2_fc1(l)) l = self.loss2_fc2(l) loss2 = F.softmax_cross_entropy(l, t) h = self.inc4e(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc5a(h) h = self.inc5b(h) h = F.average_pooling_2d(h, 7, stride=1) h = self.loss3_fc(F.dropout(h, 0.4)) loss3 = F.softmax_cross_entropy(h, t) loss = 0.3 * (loss1 + loss2) + loss3 # accuracy = F.accuracy(h, t) """ chainer.report({ 'loss': loss, 'loss1': loss1, 'loss2': loss2, 'loss3': loss3, 'accuracy': accuracy }, self) """ return loss # from https://github.com/chainer/chainer/blob/master/examples/imagenet/googlenet.py
Example #21
Source File: googlenet.py From deel with MIT License | 4 votes |
def __call__(self, x, t): h = F.relu(self.conv1(x)) h = F.local_response_normalization( F.max_pooling_2d(h, 3, stride=2), n=5) h = F.relu(self.conv2_reduce(h)) h = F.relu(self.conv2(h)) h = F.max_pooling_2d( F.local_response_normalization(h, n=5), 3, stride=2) h = self.inc3a(h) h = self.inc3b(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc4a(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss1_conv(l)) l = F.relu(self.loss1_fc1(l)) l = self.loss1_fc2(l) loss1 = F.softmax_cross_entropy(l, t) h = self.inc4b(h) h = self.inc4c(h) h = self.inc4d(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss2_conv(l)) l = F.relu(self.loss2_fc1(l)) l = self.loss2_fc2(l) loss2 = F.softmax_cross_entropy(l, t) h = self.inc4e(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc5a(h) h = self.inc5b(h) h = F.average_pooling_2d(h, 7, stride=1) h = self.loss3_fc(F.dropout(h, 0.4, train=self.train)) loss3 = F.softmax_cross_entropy(h, t) loss = 0.3 * (loss1 + loss2) + loss3 accuracy = F.accuracy(h, t) chainer.report({ 'loss': loss, 'loss1': loss1, 'loss2': loss2, 'loss3': loss3, 'accuracy': accuracy }, self) return loss
Example #22
Source File: GoogleNet_with_loss.py From chainer-compiler with MIT License | 4 votes |
def forward(self, x, t): h = F.relu(self.conv1(x)) # return h h = F.local_response_normalization( F.max_pooling_2d(h, 3, stride=2), n=5) h = F.relu(self.conv2_reduce(h)) h = F.relu(self.conv2(h)) h = F.max_pooling_2d( F.local_response_normalization(h, n=5), 3, stride=2) h = self.inc3a(h) h = self.inc3b(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc4a(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss1_conv(l)) l = F.relu(self.loss1_fc1(l)) l = self.loss1_fc2(l) loss1 = F.softmax_cross_entropy(l, t) # return loss1 h = self.inc4b(h) h = self.inc4c(h) h = self.inc4d(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss2_conv(l)) l = F.relu(self.loss2_fc1(l)) l = self.loss2_fc2(l) loss2 = F.softmax_cross_entropy(l, t) h = self.inc4e(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc5a(h) h = self.inc5b(h) h = F.average_pooling_2d(h, 7, stride=1) h = self.loss3_fc(F.dropout(h, 0.4)) loss3 = F.softmax_cross_entropy(h, t) loss = 0.3 * (loss1 + loss2) + loss3 # accuracy = F.accuracy(h, t) """ chainer.report({ 'loss': loss, 'loss1': loss1, 'loss2': loss2, 'loss3': loss3, 'accuracy': accuracy }, self) """ return loss # from https://github.com/chainer/chainer/blob/master/examples/imagenet/googlenet.py