Python chainer.functions.identity() Examples
The following are 22
code examples of chainer.functions.identity().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.functions
, or try the search function
.
Example #1
Source File: darts.py From imgclsmob with MIT License | 6 votes |
def darts_skip_connection(channels, stride): """ DARTS specific skip connection layer. Parameters: ---------- channels : int Number of input/output channels. stride : int or tuple/list of 2 int Stride of the convolution. """ assert (channels > 0) if stride == 1: return F.identity else: assert (stride == 2) return DartsReduceBranch( in_channels=channels, out_channels=channels, stride=stride)
Example #2
Source File: scatter_ggnn_readout.py From chainer-chemistry with MIT License | 6 votes |
def __init__(self, out_dim, in_channels=None, nobias=False, activation=functions.identity, activation_agg=functions.identity, concat_n_info=False): super(ScatterGGNNReadout, self).__init__() self.concat_n_info = concat_n_info if self.concat_n_info: out_dim -= 1 with self.init_scope(): self.i_layer = chainer.links.Linear( in_channels, out_dim, nobias=nobias) self.j_layer = chainer.links.Linear( in_channels, out_dim, nobias=nobias) self.out_dim = out_dim self.in_channels = in_channels self.nobias = nobias self.activation = activation self.activation_agg = activation_agg
Example #3
Source File: gwm_net.py From chainer-chemistry with MIT License | 6 votes |
def __init__(self, out_dim, hidden_channels=16, n_update_layers=4, n_atom_types=MAX_ATOMIC_NUM, dropout_ratio=0.5, concat_hidden=False, weight_tying=True, activation=functions.identity, n_edge_types=4, with_gwm=True): update_kwargs = {'dropout_ratio': dropout_ratio} readout_kwargs = {'activation': activation, 'activation_agg': activation} super(GIN_GWM, self).__init__( update_layer=GINUpdate, readout_layer=GGNNReadout, out_dim=out_dim, hidden_channels=hidden_channels, n_update_layers=n_update_layers, n_atom_types=n_atom_types, concat_hidden=concat_hidden, weight_tying=weight_tying, n_edge_types=n_edge_types, with_gwm=with_gwm, update_kwargs=update_kwargs, readout_kwargs=readout_kwargs )
Example #4
Source File: test_variable.py From chainer with MIT License | 5 votes |
def test_grad_raise_double_backprop_2(self): x = chainer.Variable(np.array([42], np.float32)) z = F.identity(x) # new style y = IdentityFunction()(z) # old style with testing.assert_warns(DeprecationWarning): y.backward(enable_double_backprop=True) with pytest.raises(RuntimeError): chainer.grad([x.grad_var], [y.grad_var])
Example #5
Source File: residual_block_2d.py From gconv_experiments with MIT License | 5 votes |
def __init__(self, in_channels, out_channels, ksize=3, fiber_map='id', conv_link=L.Convolution2D, stride=1, pad=1, wscale=1): assert ksize % 2 == 1 if not pad == (ksize - 1) // 2: raise NotImplementedError() super(ResBlock2D, self).__init__( bn1=L.BatchNormalization(in_channels), conv1=conv_link( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, wscale=wscale), bn2=L.BatchNormalization(out_channels), conv2=conv_link( in_channels=out_channels, out_channels=out_channels, ksize=ksize, stride=1, pad=pad, wscale=wscale) ) if fiber_map == 'id': if not in_channels == out_channels: raise ValueError('fiber_map cannot be identity when channel dimension is changed.') self.fiber_map = F.identity elif fiber_map == 'zero_pad': raise NotImplementedError() elif fiber_map == 'linear': fiber_map = conv_link( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=stride, pad=0, wscale=wscale) self.add_link('fiber_map', fiber_map) else: raise ValueError('Unknown fiber_map: ' + str(type))
Example #6
Source File: ggnn_readout.py From chainer-chemistry with MIT License | 5 votes |
def __init__(self, out_dim, in_channels=None, nobias=False, activation=functions.identity, activation_agg=functions.identity): super(GGNNReadout, self).__init__() with self.init_scope(): self.i_layer = GraphLinear(in_channels, out_dim, nobias=nobias) self.j_layer = GraphLinear(in_channels, out_dim, nobias=nobias) self.out_dim = out_dim self.in_channels = in_channels self.nobias = nobias self.activation = activation self.activation_agg = activation_agg
Example #7
Source File: gnn_film.py From chainer-chemistry with MIT License | 5 votes |
def __init__(self, out_dim, hidden_channels=16, n_update_layers=4, n_atom_types=MAX_ATOMIC_NUM, concat_hidden=False, weight_tying=True, activation=functions.identity, n_edge_types=5): super(GNNFiLM, self).__init__() n_readout_layer = n_update_layers if concat_hidden else 1 n_message_layer = 1 if weight_tying else n_update_layers with self.init_scope(): # Update self.embed = EmbedAtomID(out_size=hidden_channels, in_size=n_atom_types) self.update_layers = chainer.ChainList(*[GNNFiLMUpdate( hidden_channels=hidden_channels, n_edge_types=n_edge_types) for _ in range(n_message_layer)]) # Readout # self.readout_layers = chainer.ChainList(*[GeneralReadout( # out_dim=out_dim, hidden_channels=hidden_channels, # activation=activation, activation_agg=activation) # for _ in range(n_readout_layer)]) self.readout_layers = chainer.ChainList(*[GGNNReadout( out_dim=out_dim, in_channels=hidden_channels * 2, activation=activation, activation_agg=activation) for _ in range(n_readout_layer)]) self.out_dim = out_dim self.hidden_channels = hidden_channels self.n_update_layers = n_update_layers self.n_edge_types = n_edge_types self.activation = activation self.concat_hidden = concat_hidden self.weight_tying = weight_tying
Example #8
Source File: gwm_net.py From chainer-chemistry with MIT License | 5 votes |
def __init__(self, out_dim, hidden_channels=16, n_update_layers=4, n_atom_types=MAX_ATOMIC_NUM, concat_hidden=False, weight_tying=True, activation=functions.identity, n_edge_types=4, with_gwm=True): readout_kwargs = {'activation': activation, 'activation_agg': activation} super(GGNN_GWM, self).__init__( update_layer=GGNNUpdate, readout_layer=GGNNReadout, out_dim=out_dim, hidden_channels=hidden_channels, n_update_layers=n_update_layers, n_atom_types=n_atom_types, concat_hidden=concat_hidden, weight_tying=weight_tying, n_edge_types=n_edge_types, with_gwm=with_gwm, readout_kwargs=readout_kwargs)
Example #9
Source File: separable_conv_2d_bn_activ.py From chainercv with MIT License | 5 votes |
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0, dilate=1, nobias=False, dw_initialW=None, pw_initialW=None, dw_initial_bias=None, pw_initial_bias=None, dw_activ=identity, pw_activ=relu, bn_kwargs={}): self.dw_activ = identity if dw_activ is None else dw_activ self.pw_activ = identity if pw_activ is None else pw_activ super(SeparableConv2DBNActiv, self).__init__() with self.init_scope(): self.depthwise = Convolution2D( in_channels, in_channels, ksize=ksize, stride=stride, pad=pad, dilate=dilate, groups=in_channels, nobias=nobias, initialW=dw_initialW) self.pointwise = Convolution2D( in_channels, out_channels, 1, nobias=nobias, initialW=pw_initialW) if 'comm' in bn_kwargs: self.dw_bn = MultiNodeBatchNormalization( out_channels, **bn_kwargs) self.pw_bn = MultiNodeBatchNormalization( out_channels, **bn_kwargs) else: self.dw_bn = BatchNormalization(in_channels, **bn_kwargs) self.pw_bn = BatchNormalization(out_channels, **bn_kwargs)
Example #10
Source File: xception.py From chainercv with MIT License | 5 votes |
def __init__(self, in_channels, depthlist, stride=1, dilate=1, skip_type='conv', activ_first=True, bn_kwargs={}, dw_activ_list=[None, None, None], pw_activ_list=[F.relu, F.relu, None]): super(XceptionBlock, self).__init__() self.skip_type = skip_type self.activ_first = activ_first self.separable2_activ = pw_activ_list[1] with self.init_scope(): self.separable1 = SeparableConv2DBNActiv( in_channels, depthlist[0], 3, 1, dilate, dilate, nobias=True, bn_kwargs=bn_kwargs, dw_activ=dw_activ_list[0], pw_activ=pw_activ_list[0]) self.separable2 = SeparableConv2DBNActiv( depthlist[0], depthlist[1], 3, 1, dilate, dilate, nobias=True, bn_kwargs=bn_kwargs, dw_activ=dw_activ_list[1], pw_activ=F.identity) self.separable3 = SeparableConv2DBNActiv( depthlist[1], depthlist[2], 3, stride, dilate, dilate, nobias=True, bn_kwargs=bn_kwargs, dw_activ=dw_activ_list[2], pw_activ=pw_activ_list[2]) if skip_type == 'conv': self.conv = Conv2DBNActiv( in_channels, depthlist[2], 1, activ=F.identity, nobias=True, stride=stride, bn_kwargs=bn_kwargs)
Example #11
Source File: modeling.py From models with MIT License | 5 votes |
def get_activation(activation_string): """Maps a string to a Python function, e.g., "relu" => `F.relu`. Args: activation_string: String name of the activation function. Returns: A Python function corresponding to the activation function. If `activation_string` is None, empty, or "linear", this will return F.identity. If `activation_string` is not a string, it will return `activation_string`. Raises: ValueError: The `activation_string` does not correspond to a known activation. """ # We assume that anything that"s not a string is already an activation # function, so we just return it. if not isinstance(activation_string, six.string_types): return activation_string if not activation_string: return F.identity act = activation_string.lower() if act == "linear": return F.identity elif act == "relu": return F.relu elif act == "gelu": return gelu elif act == "tanh": return F.tanh else: raise ValueError("Unsupported activation: %s" % act)
Example #12
Source File: test_variable.py From chainer with MIT License | 5 votes |
def forward(self, x): y0 = F.identity(x) y10 = self.f10.apply((y0,)) y11 = self.f11.apply((y0,)) y12 = self.f12.apply((y0,)) y = self.f2.apply((y10[0], y11[0], y12[0])) return y
Example #13
Source File: fishnet.py From imgclsmob with MIT License | 5 votes |
def __call__(self, x): if self.squeeze: identity = self.c_squeeze(x) elif self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity return x
Example #14
Source File: test_variable.py From chainer with MIT License | 5 votes |
def test_raise_double_backprop_2(self): x = chainer.Variable(np.array([42], np.float32)) z = F.identity(x) # new style y = IdentityFunction()(z) # old style with testing.assert_warns(DeprecationWarning): y.backward(enable_double_backprop=True) with pytest.raises(RuntimeError): with warnings.catch_warnings(): # ok to be warned that x.grad_var is old-styled scalar warnings.simplefilter('ignore', DeprecationWarning) x.grad_var.backward()
Example #15
Source File: test_variable.py From chainer with MIT License | 5 votes |
def test_raise_double_backprop_2(self): x = chainer.Variable(np.array(42, np.float32)) z = F.identity(x) # new style y = IdentityFunction()(z) # old style y.backward(enable_double_backprop=True) with pytest.raises(RuntimeError): x.grad_var.backward()
Example #16
Source File: test_variable.py From chainer with MIT License | 5 votes |
def test_int(self): x = np.array([1], np.int) x = chainer.Variable(x) y = F.identity(x) y.grad = np.array([0], np.int) y.backward()
Example #17
Source File: test_variable.py From chainer with MIT License | 5 votes |
def test_raise(self): x = np.array([1], np.float32) x = chainer.Variable(x) y = F.identity(x) y.grad = np.array([np.nan], np.float32) with pytest.raises(RuntimeError): y.backward()
Example #18
Source File: test_variable.py From chainer with MIT License | 5 votes |
def test_backward_no_grad_required(self): class DummyId(chainer.functions.math.identity.Identity): def backward(self, a, b): raise Exception('backward should not be called on inputs that ' 'do not require grads') x = chainer.Variable(self.x) y1, y2 = DummyId().apply((x, x)) x.node._requires_grad = False y1.backward()
Example #19
Source File: test_function.py From chainer with MIT License | 5 votes |
def test_backward(self): x = chainer.Variable(numpy.array([1]), name='x') y1 = F.identity(x) y1.name = 'y1' y2 = F.identity(x) y2.name = 'y2' z = y1 + y2 z.name = 'z' z.grad = numpy.array([1]) z.backward(retain_grad=True) self.assertEqual(y1.grad[0], 1) self.assertEqual(y2.grad[0], 1) self.assertEqual(x.grad[0], 2)
Example #20
Source File: xception.py From imgclsmob with MIT License | 5 votes |
def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = F.identity(x) x = self.body(x) x = x + identity return x
Example #21
Source File: hrnet.py From imgclsmob with MIT License | 4 votes |
def __init__(self, in_channels_list, out_channels_list, num_modules, num_branches, num_subblocks): super(HRStage, self).__init__() self.branches = num_branches self.in_channels_list = out_channels_list in_branches = len(in_channels_list) out_branches = len(out_channels_list) with self.init_scope(): self.transition = SimpleSequential() with self.transition.init_scope(): for i in range(out_branches): if i < in_branches: if out_channels_list[i] != in_channels_list[i]: setattr(self.transition, "block{}".format(i + 1), conv3x3_block( in_channels=in_channels_list[i], out_channels=out_channels_list[i], stride=1)) else: setattr(self.transition, "block{}".format(i + 1), F.identity) else: conv3x3_seq = SimpleSequential() with conv3x3_seq.init_scope(): for j in range(i + 1 - in_branches): in_channels_i = in_channels_list[-1] out_channels_i = out_channels_list[i] if j == i - in_branches else in_channels_i setattr(conv3x3_seq, "subblock{}".format(j + 1), conv3x3_block( in_channels=in_channels_i, out_channels=out_channels_i, stride=2)) setattr(self.transition, "block{}".format(i + 1), conv3x3_seq) self.layers = SimpleSequential() with self.layers.init_scope(): for i in range(num_modules): block = HRBlock( in_channels_list=self.in_channels_list, out_channels_list=out_channels_list, num_branches=num_branches, num_subblocks=num_subblocks) setattr(self.layers, "block{}".format(i + 1), block) self.in_channels_list = block.in_channels_list
Example #22
Source File: hrnet.py From imgclsmob with MIT License | 4 votes |
def __init__(self, in_channels_list, out_channels_list, num_branches, num_subblocks): super(HRBlock, self).__init__() self.in_channels_list = in_channels_list self.num_branches = num_branches with self.init_scope(): self.branches = SimpleSequential() with self.branches.init_scope(): for i in range(num_branches): layers = SimpleSequential() with layers.init_scope(): in_channels_i = self.in_channels_list[i] out_channels_i = out_channels_list[i] for j in range(num_subblocks[i]): setattr(layers, "unit{}".format(j + 1), ResUnit( in_channels=in_channels_i, out_channels=out_channels_i, stride=1, bottleneck=False)) in_channels_i = out_channels_i self.in_channels_list[i] = out_channels_i setattr(self.branches, "branch{}".format(i + 1), layers) if num_branches > 1: self.fuse_layers = SimpleSequential() with self.fuse_layers.init_scope(): for i in range(num_branches): fuse_layer = SimpleSequential() with fuse_layer.init_scope(): for j in range(num_branches): if j > i: setattr(fuse_layer, "block{}".format(j + 1), UpSamplingBlock( in_channels=in_channels_list[j], out_channels=in_channels_list[i], scale_factor=2 ** (j - i))) elif j == i: setattr(fuse_layer, "block{}".format(j + 1), F.identity) else: conv3x3_seq = SimpleSequential() with conv3x3_seq.init_scope(): for k in range(i - j): if k == i - j - 1: setattr(conv3x3_seq, "subblock{}".format(k + 1), conv3x3_block( in_channels=in_channels_list[j], out_channels=in_channels_list[i], stride=2, activation=None)) else: setattr(conv3x3_seq, "subblock{}".format(k + 1), conv3x3_block( in_channels=in_channels_list[j], out_channels=in_channels_list[j], stride=2)) setattr(fuse_layer, "block{}".format(j + 1), conv3x3_seq) setattr(self.fuse_layers, "layer{}".format(i + 1), fuse_layer) self.activ = F.relu