Python mxnet.nd.relu() Examples
The following are 18
code examples of mxnet.nd.relu().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
mxnet.nd
, or try the search function
.
Example #1
Source File: hourglass.py From mxnet-centernet with MIT License | 6 votes |
def forward(self, img): inter = self.pre(img) #print("\t inter shape: ", inter.shape) outs = [] for ind in range(self.num_stacks): kp_, conv_ = self.kpts[ind], self.convs[ind] kp = kp_(inter) conv = conv_(kp) #print("\t conv shape: ", conv.shape) out = {} for head in self.heads: layer = self.__getattribute__(head)[ind] y = layer(conv) out[head] = y outs.append(out) if ind < self.num_stacks - 1: inter = self.inters_[ind](inter) + self.convs_[ind](conv) inter = nd.relu(inter) inter = self.inters[ind](inter) #print("\t inter shape: ", inter.shape) return outs
Example #2
Source File: utils.py From d2l-zh with Apache License 2.0 | 6 votes |
def resnet18(num_classes): """The ResNet-18 model.""" net = nn.Sequential() net.add(nn.Conv2D(64, kernel_size=3, strides=1, padding=1), nn.BatchNorm(), nn.Activation('relu')) def resnet_block(num_channels, num_residuals, first_block=False): blk = nn.Sequential() for i in range(num_residuals): if i == 0 and not first_block: blk.add(Residual(num_channels, use_1x1conv=True, strides=2)) else: blk.add(Residual(num_channels)) return blk net.add(resnet_block(64, 2, first_block=True), resnet_block(128, 2), resnet_block(256, 2), resnet_block(512, 2)) net.add(nn.GlobalAvgPool2D(), nn.Dense(num_classes)) return net
Example #3
Source File: utils.py From d2l-zh with Apache License 2.0 | 6 votes |
def resnet18(num_classes): """The ResNet-18 model.""" net = nn.Sequential() net.add(nn.Conv2D(64, kernel_size=3, strides=1, padding=1), nn.BatchNorm(), nn.Activation('relu')) def resnet_block(num_channels, num_residuals, first_block=False): blk = nn.Sequential() for i in range(num_residuals): if i == 0 and not first_block: blk.add(Residual(num_channels, use_1x1conv=True, strides=2)) else: blk.add(Residual(num_channels)) return blk net.add(resnet_block(64, 2, first_block=True), resnet_block(128, 2), resnet_block(256, 2), resnet_block(512, 2)) net.add(nn.GlobalAvgPool2D(), nn.Dense(num_classes)) return net
Example #4
Source File: base_layers.py From STGCN with GNU General Public License v3.0 | 6 votes |
def forward(self, x): ''' Parameters ---------- x: nd.array, shape is (batch_size, c_in, time_step, num_of_vertices) Returns ---------- shape is (batch_size, c_out, time_step, num_of_vertices) ''' batch_size, c_in, T, num_of_vertices = x.shape x_input = self.align(x) x_tmp = x.transpose((0, 2, 3, 1)) \ .reshape((-1, num_of_vertices, c_in)) x_gconv = self.gconv(x_tmp, self.cheb_polys.data()) x_gc = x_gconv.reshape((-1, T, num_of_vertices, self.c_out)) \ .transpose((0, 3, 1, 2)) x_gc = x_gc[:, : self.c_out, :, :] return nd.relu(x_gc + x_input)
Example #5
Source File: base_layers.py From STGCN with GNU General Public License v3.0 | 6 votes |
def forward(self, x): ''' Parameters ---------- x: nd.array, shape is (batch_size, c_in, time_step, num_of_vertices) Returns ---------- shape is (batch_size, c_out, time_step - Kt + 1, num_of_vertices) ''' x_input = self.align(x)[:, :, self.Kt - 1:, :] x_conv = self.conv(x) if self.activation == 'GLU': x_conv = self.conv(x) x_conv1, x_conv2 = nd.split(x_conv, axis=1, num_outputs=2) return (x_conv1 + x_input) * nd.sigmoid(x_conv2) if self.activation == 'relu': return nd.relu(x_conv + x_input) return x_conv
Example #6
Source File: gin.py From dgl with Apache License 2.0 | 6 votes |
def forward(self, g, h): hidden_rep = [h] for i in range(self.num_layers - 1): h = self.ginlayers[i](g, h) h = self.batch_norms[i](h) h = nd.relu(h) hidden_rep.append(h) score_over_layer = 0 # perform pooling over all nodes in each graph in every layer for i, h in enumerate(hidden_rep): pooled_h = self.pool(g, h) score_over_layer = score_over_layer + self.drop(self.linears_prediction[i](pooled_h)) return score_over_layer
Example #7
Source File: base_layers.py From STGCN with GNU General Public License v3.0 | 5 votes |
def __init__(self, Kt, c_in, c_out, activation='relu', **kwargs): super(Temporal_conv_layer, self).__init__(**kwargs) self.Kt = Kt self.c_out = c_out self.activation = activation with self.name_scope(): self.align = Align_layer(c_in, c_out, None) if activation == 'GLU': self.conv = nn.Conv2D(2 * c_out, (Kt, 1), activation=None) elif activation == 'relu': self.conv = nn.Conv2D(c_out, (Kt, 1), activation=None) else: self.conv = nn.Conv2D(c_out, (Kt, 1), activation=activation)
Example #8
Source File: hourglass.py From mxnet-centernet with MIT License | 5 votes |
def forward(self, X): conv = self.conv(X) bn = self.bn(conv) return nd.relu(bn)
Example #9
Source File: hourglass.py From mxnet-centernet with MIT License | 5 votes |
def forward(self, X): linear = self.linear(X) bn = self.bn(linear) if self.with_bn else linear return nd.relu(bn)
Example #10
Source File: hourglass.py From mxnet-centernet with MIT License | 5 votes |
def forward(self, X): conv1 = self.conv1(X) bn1 = self.bn1(conv1) relu1 = nd.relu(bn1) conv2 = self.conv2(relu1) bn2 = self.bn2(conv2) skip = self.skip(X) return nd.relu(bn2 + skip)
Example #11
Source File: gin.py From dgl with Apache License 2.0 | 5 votes |
def forward(self, h): h = self.mlp(h) h = self.bn(h) h = nd.relu(h) return h
Example #12
Source File: graph.py From ST-MetaNet with MIT License | 5 votes |
def msg_reduce(self, node): state = node.mailbox['state'] alpha = node.mailbox['alpha'] alpha = nd.softmax(alpha, axis=1) new_state = nd.relu(nd.sum(alpha * state, axis=1)) return { 'new_state': new_state }
Example #13
Source File: graph.py From ST-MetaNet with MIT License | 5 votes |
def msg_reduce(self, node): state = node.mailbox['state'] alpha = node.mailbox['alpha'] alpha = nd.softmax(alpha, axis=1) new_state = nd.relu(nd.sum(alpha * state, axis=1)) * nd.sigmoid(self.weight.data(state.context)) return { 'new_state': new_state }
Example #14
Source File: graph.py From ST-MetaNet with MIT License | 5 votes |
def msg_reduce(self, node): state = node.mailbox['state'] alpha = node.mailbox['alpha'] alpha = nd.softmax(alpha, axis=1) new_state = nd.relu(nd.sum(alpha * state, axis=1)) * nd.sigmoid(self.weight.data(state.context)) return { 'new_state': new_state }
Example #15
Source File: graph.py From ST-MetaNet with MIT License | 5 votes |
def msg_reduce(self, node): state = node.mailbox['state'] alpha = node.mailbox['alpha'] alpha = nd.softmax(alpha, axis=1) new_state = nd.relu(nd.sum(alpha * state, axis=1)) * nd.sigmoid(self.weight.data(state.context)) return { 'new_state': new_state }
Example #16
Source File: utils.py From d2l-zh with Apache License 2.0 | 5 votes |
def forward(self, X): Y = nd.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3: X = self.conv3(X) return nd.relu(Y + X)
Example #17
Source File: utils.py From d2l-zh with Apache License 2.0 | 5 votes |
def forward(self, X): Y = nd.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3: X = self.conv3(X) return nd.relu(Y + X)
Example #18
Source File: gin.py From dgl with Apache License 2.0 | 5 votes |
def forward(self, x): if self.linear_or_not: return self.linear(x) else: h = x for i in range(self.num_layers - 1): h = nd.relu(self.batch_norms[i](self.linears[i](h))) return self.linears[-1](h)