Python chainer.functions.clipped_relu() Examples
The following are 10
code examples of chainer.functions.clipped_relu().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.functions
, or try the search function
.
Example #1
Source File: basic_cnn_tail.py From SeRanet with MIT License | 6 votes |
def __call__(self, x, t=None): self.clear() #x = Variable(x_data) # x_data.astype(np.float32) h = F.leaky_relu(self.conv1(x), slope=0.1) h = F.leaky_relu(self.conv2(h), slope=0.1) h = F.leaky_relu(self.conv3(h), slope=0.1) h = F.leaky_relu(self.conv4(h), slope=0.1) h = F.leaky_relu(self.conv5(h), slope=0.1) h = F.leaky_relu(self.conv6(h), slope=0.1) h = F.clipped_relu(self.conv7(h), z=1.0) if self.train: self.loss = F.mean_squared_error(h, t) return self.loss else: return h
Example #2
Source File: test_clipped_relu.py From chainer with MIT License | 5 votes |
def forward(self, inputs, device): x, = inputs y = functions.clipped_relu(x, self.z) return y,
Example #3
Source File: test_clipped_relu.py From chainer with MIT License | 5 votes |
def forward(self): x = chainer.Variable(self.x) return functions.clipped_relu(x, self.z)
Example #4
Source File: basic_cnn_small.py From SeRanet with MIT License | 5 votes |
def __call__(self, x, t=None): self.clear() h = F.leaky_relu(self.conv1(x), slope=0.1) h = F.leaky_relu(self.conv2(h), slope=0.1) #h = F.leaky_relu(self.conv3(h), slope=0.1) #h = F.leaky_relu(self.conv4(h), slope=0.1) h = F.clipped_relu(self.conv3(h), z=1.0) if self.train: self.loss = F.mean_squared_error(h, t) return self.loss else: return h
Example #5
Source File: seranet_v1.py From SeRanet with MIT License | 5 votes |
def __call__(self, x, t=None): self.clear() h1 = F.leaky_relu(self.conv1(x), slope=0.1) h1 = F.leaky_relu(self.conv2(h1), slope=0.1) h1 = F.leaky_relu(self.conv3(h1), slope=0.1) h2 = self.seranet_v1_crbm(x) # Fusion h12 = F.concat((h1, h2), axis=1) lu = F.leaky_relu(self.convlu6(h12), slope=0.1) lu = F.leaky_relu(self.convlu7(lu), slope=0.1) lu = F.leaky_relu(self.convlu8(lu), slope=0.1) ru = F.leaky_relu(self.convru6(h12), slope=0.1) ru = F.leaky_relu(self.convru7(ru), slope=0.1) ru = F.leaky_relu(self.convru8(ru), slope=0.1) ld = F.leaky_relu(self.convld6(h12), slope=0.1) ld = F.leaky_relu(self.convld7(ld), slope=0.1) ld = F.leaky_relu(self.convld8(ld), slope=0.1) rd = F.leaky_relu(self.convrd6(h12), slope=0.1) rd = F.leaky_relu(self.convrd7(rd), slope=0.1) rd = F.leaky_relu(self.convrd8(rd), slope=0.1) # Splice h = CF.splice(lu, ru, ld, rd) h = F.leaky_relu(self.conv9(h), slope=0.1) h = F.leaky_relu(self.conv10(h), slope=0.1) h = F.leaky_relu(self.conv11(h), slope=0.1) h = F.clipped_relu(self.conv12(h), z=1.0) if self.train: self.loss = F.mean_squared_error(h, t) return self.loss else: return h
Example #6
Source File: basic_cnn_head.py From SeRanet with MIT License | 5 votes |
def __call__(self, x, t=None): self.clear() h = F.leaky_relu(self.conv1(x), slope=0.1) h = F.leaky_relu(self.conv2(h), slope=0.1) h = F.leaky_relu(self.conv3(h), slope=0.1) h = F.leaky_relu(self.conv4(h), slope=0.1) h = F.leaky_relu(self.conv5(h), slope=0.1) h = F.leaky_relu(self.conv6(h), slope=0.1) h = F.clipped_relu(self.conv7(h), z=1.0) if self.train: self.loss = F.mean_squared_error(h, t) return self.loss else: return h
Example #7
Source File: basic_cnn_middle.py From SeRanet with MIT License | 5 votes |
def __call__(self, x, t=None): self.clear() h = F.leaky_relu(self.conv1(x), slope=0.1) h = F.leaky_relu(self.conv2(h), slope=0.1) h = F.leaky_relu(self.conv3(h), slope=0.1) h = F.leaky_relu(self.conv4(h), slope=0.1) h = F.leaky_relu(self.conv5(h), slope=0.1) h = F.leaky_relu(self.conv6(h), slope=0.1) h = F.clipped_relu(self.conv7(h), z=1.0) if self.train: self.loss = F.mean_squared_error(h, t) return self.loss else: return h
Example #8
Source File: net.py From PredNet with Apache License 2.0 | 5 votes |
def __call__(self, x): for nth in range(self.layers): if getattr(self, 'P' + str(nth)) is None: setattr(self, 'P' + str(nth), variable.Variable( self.xp.zeros(self.sizes[nth], dtype=x.data.dtype), volatile='auto')) E = [None] * self.layers for nth in range(self.layers): if nth == 0: E[nth] = F.concat((F.relu(x - getattr(self, 'P' + str(nth))), F.relu(getattr(self, 'P' + str(nth)) - x))) else: A = F.max_pooling_2d(F.relu(getattr(self, 'ConvA' + str(nth))(E[nth - 1])), 2, stride = 2) E[nth] = F.concat((F.relu(A - getattr(self, 'P' + str(nth))), F.relu(getattr(self, 'P' + str(nth)) - A))) R = [None] * self.layers for nth in reversed(range(self.layers)): if nth == self.layers - 1: R[nth] = getattr(self, 'ConvLSTM' + str(nth))((E[nth],)) else: upR = F.unpooling_2d(R[nth + 1], 2, stride = 2, cover_all=False) R[nth] = getattr(self, 'ConvLSTM' + str(nth))((E[nth], upR)) if nth == 0: setattr(self, 'P' + str(nth), F.clipped_relu(getattr(self, 'ConvP' + str(nth))(R[nth]), 1.0)) else: setattr(self, 'P' + str(nth), F.relu(getattr(self, 'ConvP' + str(nth))(R[nth]))) return self.P0
Example #9
Source File: expanded_conv_2d.py From chainercv with MIT License | 4 votes |
def __init__(self, in_channels, out_channels, expansion_size=expand_input_by_factor(6), expand_pad='SAME', depthwise_stride=1, depthwise_ksize=3, depthwise_pad='SAME', project_pad='SAME', initialW=None, bn_kwargs={}): super(ExpandedConv2D, self).__init__() with self.init_scope(): if callable(expansion_size): self.inner_size = expansion_size(num_inputs=in_channels) else: self.inner_size = expansion_size def relu6(x): return clipped_relu(x, 6.) if self.inner_size > in_channels: self.expand = TFConv2DBNActiv( in_channels, self.inner_size, ksize=1, pad=expand_pad, nobias=True, initialW=initialW, bn_kwargs=bn_kwargs, activ=relu6) depthwise_in_channels = self.inner_size else: depthwise_in_channels = in_channels self.depthwise = TFConv2DBNActiv( depthwise_in_channels, self.inner_size, ksize=depthwise_ksize, stride=depthwise_stride, pad=depthwise_pad, nobias=True, initialW=initialW, groups=depthwise_in_channels, bn_kwargs=bn_kwargs, activ=relu6) self.project = TFConv2DBNActiv( self.inner_size, out_channels, ksize=1, pad=project_pad, nobias=True, initialW=initialW, bn_kwargs=bn_kwargs, activ=None)
Example #10
Source File: model.py From brain_segmentation with MIT License | 4 votes |
def __call__(self, x, train=False): """ calculate output of VoxResNet given input x Parameters ---------- x : (batch_size, in_channels, xlen, ylen, zlen) ndarray image to perform semantic segmentation Returns ------- proba: (batch_size, n_classes, xlen, ylen, zlen) ndarray probability of each voxel belonging each class elif train=True, returns list of logits """ with chainer.using_config("train", train): h = self.conv1a(x) h = F.relu(self.bnorm1a(h)) h = self.conv1b(h) c1 = F.clipped_relu(self.c1deconv(h)) c1 = self.c1conv(c1) h = F.relu(self.bnorm1b(h)) h = self.conv1c(h) h = self.voxres2(h) h = self.voxres3(h) c2 = F.clipped_relu(self.c2deconv(h)) c2 = self.c2conv(c2) h = F.relu(self.bnorm3(h)) h = self.conv4(h) h = self.voxres5(h) h = self.voxres6(h) c3 = F.clipped_relu(self.c3deconv(h)) c3 = self.c3conv(c3) h = F.relu(self.bnorm6(h)) h = self.conv7(h) h = self.voxres8(h) h = self.voxres9(h) c4 = F.clipped_relu(self.c4deconv(h)) c4 = self.c4conv(c4) c = c1 + c2 + c3 + c4 if train: return [c1, c2, c3, c4, c] else: return F.softmax(c)