Python caffe.NetSpec() Examples
The following are 30
code examples of caffe.NetSpec().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
caffe
, or try the search function
.
Example #1
Source File: net.py From pytorch-fcn with MIT License | 6 votes |
def fcn(split, tops): n = caffe.NetSpec() n.color, n.hha, n.label = L.Python(module='nyud_layers', layer='NYUDSegDataLayer', ntop=3, param_str=str(dict(nyud_dir='../data/nyud', split=split, tops=tops, seed=1337))) n = modality_fcn(n, 'color', 'color') n = modality_fcn(n, 'hha', 'hha') n.score_fused = L.Eltwise(n.score_frcolor, n.score_frhha, operation=P.Eltwise.SUM, coeff=[0.5, 0.5]) n.upscore = L.Deconvolution(n.score_fused, convolution_param=dict(num_output=40, kernel_size=64, stride=32, bias_term=False), param=[dict(lr_mult=0)]) n.score = crop(n.upscore, n.color) n.loss = L.SoftmaxWithLoss(n.score, n.label, loss_param=dict(normalize=False, ignore_label=255)) return n.to_proto()
Example #2
Source File: train.py From dilation with MIT License | 6 votes |
def make_frontend_vgg(options, is_training): batch_size = options.train_batch if is_training else options.test_batch image_path = options.train_image if is_training else options.test_image label_path = options.train_label if is_training else options.test_label net = caffe.NetSpec() net.data, net.label = network.make_image_label_data( image_path, label_path, batch_size, is_training, options.crop_size, options.mean) last = network.build_frontend_vgg( net, net.data, options.classes)[0] if options.up: net.upsample = network.make_upsample(last, options.classes) last = net.upsample net.loss = network.make_softmax_loss(last, net.label) if not is_training: net.accuracy = network.make_accuracy(last, net.label) return net.to_proto()
Example #3
Source File: train.py From dilation with MIT License | 6 votes |
def make_context(options, is_training): batch_size = options.train_batch if is_training else options.test_batch image_path = options.train_image if is_training else options.test_image label_path = options.train_label if is_training else options.test_label net = caffe.NetSpec() net.data, net.label = network.make_bin_label_data( image_path, label_path, batch_size, options.label_shape, options.label_stride) last = network.build_context( net, net.data, options.classes, options.layers)[0] if options.up: net.upsample = network.make_upsample(last, options.classes) last = net.upsample net.loss = network.make_softmax_loss(last, net.label) if not is_training: net.accuracy = network.make_accuracy(last, net.label) return net.to_proto()
Example #4
Source File: train.py From deepcontext with MIT License | 6 votes |
def gen_net(batch_size=512, use_bn=True): n=NetSpec(); n.data = L.DummyData(shape={"dim":[batch_size,3,96,96]}) n.select1 = L.DummyData(shape={"dim":[2]}) n.select2 = L.DummyData(shape={"dim":[2]}) n.label = L.DummyData(shape={"dim":[2]}) caffenet_stack(n.data, n, use_bn) n.first = L.BatchReindex(n.relu6, n.select1) n.second = L.BatchReindex(n.relu6, n.select2) n.fc6_concat=L.Concat(n.first, n.second); fc_relu(n, n.fc6_concat, '7', 4096, batchnorm=use_bn); fc_relu(n, n.relu7, '8', 4096); n.fc9 = L.InnerProduct(n.relu8, num_output=8, weight_filler=dict(type='xavier')); n.loss = L.SoftmaxWithLoss(n.fc9, n.label, loss_param=dict(normalization=P.Loss.NONE)); prot=n.to_proto() prot.debug_info=True return prot; # image preprocessing. Note that the input image will modified.
Example #5
Source File: test_net_spec.py From mix-and-match with MIT License | 6 votes |
def lenet(batch_size): n = caffe.NetSpec() n.data, n.label = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]), dict(dim=[batch_size, 1, 1, 1])], transform_param=dict(scale=1./255), ntop=2) n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier')) n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX) n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier')) n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX) n.ip1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier')) n.relu1 = L.ReLU(n.ip1, in_place=True) n.ip2 = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier')) n.loss = L.SoftmaxWithLoss(n.ip2, n.label) return n.to_proto()
Example #6
Source File: net.py From fcn with MIT License | 6 votes |
def fcn(split, tops): n = caffe.NetSpec() n.color, n.hha, n.label = L.Python(module='nyud_layers', layer='NYUDSegDataLayer', ntop=3, param_str=str(dict(nyud_dir='../data/nyud', split=split, tops=tops, seed=1337))) n = modality_fcn(n, 'color', 'color') n = modality_fcn(n, 'hha', 'hha') n.score_fused = L.Eltwise(n.score_frcolor, n.score_frhha, operation=P.Eltwise.SUM, coeff=[0.5, 0.5]) n.upscore = L.Deconvolution(n.score_fused, convolution_param=dict(num_output=40, kernel_size=64, stride=32, bias_term=False), param=[dict(lr_mult=0)]) n.score = crop(n.upscore, n.color) n.loss = L.SoftmaxWithLoss(n.score, n.label, loss_param=dict(normalize=False, ignore_label=255)) return n.to_proto()
Example #7
Source File: test_net_spec.py From Deep-Learning-Based-Structural-Damage-Detection with MIT License | 6 votes |
def lenet(batch_size): n = caffe.NetSpec() n.data, n.label = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]), dict(dim=[batch_size, 1, 1, 1])], transform_param=dict(scale=1./255), ntop=2) n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier')) n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX) n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier')) n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX) n.ip1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier')) n.relu1 = L.ReLU(n.ip1, in_place=True) n.ip2 = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier')) n.loss = L.SoftmaxWithLoss(n.ip2, n.label) return n.to_proto()
Example #8
Source File: test_coord_map.py From Deep-Learning-Based-Structural-Damage-Detection with MIT License | 6 votes |
def test_nd_conv(self): """ ND conv maps the same way in more dimensions. """ n = caffe.NetSpec() # define data with 3 spatial dimensions, otherwise the same net n.data = L.Input(shape=dict(dim=[2, 3, 100, 100, 100])) n.conv = L.Convolution( n.data, num_output=10, kernel_size=[3, 3, 3], stride=[1, 1, 1], pad=[0, 1, 2]) n.pool = L.Pooling( n.conv, pool=P.Pooling.MAX, kernel_size=2, stride=2, pad=0) n.deconv = L.Deconvolution( n.pool, num_output=10, kernel_size=4, stride=2, pad=0) ax, a, b = coord_map_from_to(n.deconv, n.data) self.assertEquals(ax, 1) self.assertTrue(len(a) == len(b)) self.assertTrue(np.all(a == 1)) self.assertEquals(b[0] - 1, b[1]) self.assertEquals(b[1] - 1, b[2])
Example #9
Source File: test_coord_map.py From Deep-Learning-Based-Structural-Damage-Detection with MIT License | 6 votes |
def coord_net_spec(ks=3, stride=1, pad=0, pool=2, dstride=2, dpad=0): """ Define net spec for simple conv-pool-deconv pattern common to all coordinate mapping tests. """ n = caffe.NetSpec() n.data = L.Input(shape=dict(dim=[2, 1, 100, 100])) n.aux = L.Input(shape=dict(dim=[2, 1, 20, 20])) n.conv = L.Convolution( n.data, num_output=10, kernel_size=ks, stride=stride, pad=pad) n.pool = L.Pooling( n.conv, pool=P.Pooling.MAX, kernel_size=pool, stride=pool, pad=0) # for upsampling kernel size is 2x stride try: deconv_ks = [s*2 for s in dstride] except: deconv_ks = dstride*2 n.deconv = L.Deconvolution( n.pool, num_output=10, kernel_size=deconv_ks, stride=dstride, pad=dpad) return n
Example #10
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_prelu2(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([10, 4, 64, 64])) n.relu1 = L.PReLU(n.input1, channel_shared=True) self._test_model(*self._netspec_to_model(n, 'prelu2'))
Example #11
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_threshold2(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([6, 4, 64, 64])) n.abs1 = L.Threshold(n.input1, threshold=1.5) self._test_model(*self._netspec_to_model(n, 'threshold2'))
Example #12
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_concat(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([10, 4, 64, 64])) n.input2 = L.Input(shape=make_shape([10, 6, 64, 64])) n.input3 = L.Input(shape=make_shape([10, 8, 64, 64])) n.concat1 = L.Concat(n.input1, n.input2, n.input3) self._test_model(*self._netspec_to_model(n, 'concat'))
Example #13
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_concat2(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([10, 4, 64, 64])) n.input2 = L.Input(shape=make_shape([10, 6, 64, 64])) n.input3 = L.Input(shape=make_shape([10, 8, 64, 64])) n.concat1 = L.Concat(n.input1, n.input2, n.input3, axis=1) self._test_model(*self._netspec_to_model(n, 'concat2'))
Example #14
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_scale(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([6, 4, 64, 64])) n.scale1 = L.Scale(n.input1) self._test_model(*self._netspec_to_model(n, 'scale'))
Example #15
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_scale3(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([6, 4, 64, 64])) n.input2 = L.Input(shape=make_shape([6, 4, 64, 64])) n.scale1 = L.Scale(n.input1, n.input2, bias_term=True, axis=0) self._test_model(*self._netspec_to_model(n, 'scale3'))
Example #16
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_scale2(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([6, 4, 64, 64])) n.scale1 = L.Scale(n.input1, bias_term=True, bias_filler=make_bias_filler()) self._test_model(*self._netspec_to_model(n, 'scale2'))
Example #17
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_scale4(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([6, 4, 64, 64])) n.scale1 = L.Scale(n.input1, bias_term=True, bias_filler=make_bias_filler(), axis=0) self._test_model(*self._netspec_to_model(n, 'scale4'))
Example #18
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_batchnorm2(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([6, 4, 64, 64])) n.batch_norm1 = L.BatchNorm(n.input1) self._test_model(*self._netspec_to_model(n, 'batchnorm2'))
Example #19
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_eltwise_sum2(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([6, 4, 64, 64])) n.input2 = L.Input(shape=make_shape([6, 4, 64, 64])) n.input3 = L.Input(shape=make_shape([6, 4, 64, 64])) n.sum1 = L.Eltwise(n.input1, n.input2, n.input3, coeff=[1.1, 2.2, 3.3]) self._test_model(*self._netspec_to_model(n, 'eltwise_sum2')) # only testing
Example #20
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_relu2(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([10, 4, 64, 64])) n.relu1 = L.ReLU(n.input1, negative_slope=0.1) self._test_model(*self._netspec_to_model(n, 'relu2'))
Example #21
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_relu(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([10, 4, 64, 64])) n.relu1 = L.ReLU(n.input1) self._test_model(*self._netspec_to_model(n, 'relu'))
Example #22
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_flatten2(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([6, 4, 64, 64])) n.flatten1 = L.Flatten(n.input1, axis=-3, end_axis=-2) self._test_model(*self._netspec_to_model(n, 'flatten2'))
Example #23
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_threshold(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([6, 4, 64, 64])) n.abs1 = L.Threshold(n.input1, threshold=0.0) self._test_model(*self._netspec_to_model(n, 'threshold'))
Example #24
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_elu2(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([10, 4, 64, 64])) n.elu1 = L.ELU(n.input1, alpha=2.0) self._test_model(*self._netspec_to_model(n, 'elu2'))
Example #25
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_deconvolution3(self): CAFFE_ENGINE = 1 n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([10, 4, 6, 6, 6])) n.deconv1 = L.Deconvolution(n.input1, convolution_param=dict(num_output=10, kernel_size=5, pad=1, stride=2, group=2, weight_filler=make_weight_filler(), bias_filler=make_bias_filler(), engine=CAFFE_ENGINE)) self._test_model(*self._netspec_to_model(n, 'deconvolution3'))
Example #26
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_deconvolution2(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([10, 4, 64, 64])) n.deconv1 = L.Deconvolution(n.input1, convolution_param=dict(num_output=10, kernel_size=5, bias_term=False, pad_h=10, pad_w=5, weight_filler=make_weight_filler(), bias_filler=make_bias_filler())) self._test_model(*self._netspec_to_model(n, 'deconvolution2'))
Example #27
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_deconvolution(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([10, 4, 64, 64])) n.deconv1 = L.Deconvolution(n.input1, convolution_param=dict(num_output=10, kernel_size=5, pad=10, stride=2, group=2, weight_filler=make_weight_filler(), bias_filler=make_bias_filler())) self._test_model(*self._netspec_to_model(n, 'deconvolution'))
Example #28
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_pooling3(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([10, 3, 64, 64])) n.pooling1 = L.Pooling(n.input1, kernel_size=10, pad_h=5, pad_w=3, stride_h=2, stride_w=3, pool=P.Pooling.MAX) self._test_model(*self._netspec_to_model(n, 'pooling3'))
Example #29
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_pooling2(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([10, 3, 64, 64])) n.pooling1 = L.Pooling(n.input1, kernel_h=10, kernel_w=10, pad_h=5, pad_w=3, stride=2, pool=P.Pooling.AVE) self._test_model(*self._netspec_to_model(n, 'pooling2'))
Example #30
Source File: caffe_layer_test_cases.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def test_group_convolution(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([1, 8, 32, 32])) n.conv1 = L.Convolution(n.input1, num_output=16, kernel_size=5, group=4, weight_filler=make_weight_filler(), bias_filler=make_bias_filler()) self._test_model(*self._netspec_to_model(n, 'group_convolution'))