Python caffe.proto.caffe_pb2.TRAIN Examples

The following are 15 code examples of caffe.proto.caffe_pb2.TRAIN(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module caffe.proto.caffe_pb2 , or try the search function .
Example #1
Source File: builder.py    From channel-pruning with MIT License 6 votes vote down vote up
def orth_loss_v2(self, bottom_name):
        # self.Python('orth_loss', 'orthLossLayer', loss_weight=1, bottom=[bottom_name], top=[name], name=name)
        # , bottom=[bottom+'_MVN']
        # save bottom
        mainpath = self.bottom

        bottom = bottom_name #'NormLayer', 
        # self.MVN(bottom=[bottom])
        layer = "TransposeLayer"
        layername = bottom_name+'_' + layer
        outputs = [layername]
        self.Python(layer, layer, top=outputs, bottom=[bottom], name=layername, phase='TRAIN')
        self.Matmul()
        
        outputs = [self.this.name]
        self.EuclideanLoss(name=bottom_name+'_euclidean', bottom=outputs, loss_weight=1e-1, phase='TRAIN')
        
        # restore bottom
        self.cur = mainpath 
Example #2
Source File: builder.py    From channel-pruning with MIT License 6 votes vote down vote up
def resnet(n=3, num_output = 16):
    """6n+2, n=3 9 18 coresponds to 20 56 110 layers"""    
    net_name = "resnet-"    
    pt_folder = osp.join(osp.abspath(osp.curdir), net_name +str(6*n+2))
    name = net_name+str(6*n+2)+'-cifar10'

    if n > 18:
        # warm up
        solver = Solver(solver_name="solver_warm.prototxt", folder=pt_folder, lr_policy=Solver.policy.fixed)
        solver.p.base_lr = 0.01
        solver.set_max_iter(500)
        solver.write()
        del solver
    
    solver = Solver(folder=pt_folder)
    solver.write()
    del solver

    builder = Net(name)
    builder.Data('cifar-10-batches-py/train', phase='TRAIN', crop_size=32)
    builder.Data('cifar-10-batches-py/test', phase='TEST')
    builder.resnet_cifar(n, num_output=num_output)
    builder.write(folder=pt_folder) 
Example #3
Source File: builder.py    From channel-pruning with MIT License 6 votes vote down vote up
def resnet_orth_v2(n=3):
    """6n+2, n=3 9 18 coresponds to 20 56 110 layers"""    
    net_name = "resnet-orth-v2"    
    pt_folder = osp.join(osp.abspath(osp.curdir), net_name +str(6*n+2))
    name = net_name+str(6*n+2)+'-cifar10'

    if n > 18:
        # warm up
        solver = Solver(solver_name="solver_warm.prototxt", folder=pt_folder, lr_policy=Solver.policy.fixed)
        solver.p.base_lr = 0.01
        solver.set_max_iter(500)
        solver.write()
        del solver
    
    solver = Solver(folder=pt_folder)
    solver.write()
    del solver

    builder = Net(name)
    builder.Data('cifar-10-batches-py/train', phase='TRAIN', crop_size=32)
    builder.Data('cifar-10-batches-py/test', phase='TEST')
    builder.resnet_cifar(n, orth=True, v2=True)
    builder.write(folder=pt_folder) 
Example #4
Source File: net_generator.py    From resnet-cifar10-caffe with MIT License 5 votes vote down vote up
def include(self, phase='TRAIN'):
        if phase is not None:
            includes = self.this.include.add()
            if phase == 'TRAIN':
                includes.phase = caffe_pb2.TRAIN
            elif phase == 'TEST':
                includes.phase = caffe_pb2.TEST
        else:
            NotImplementedError


    #************************** inplace ************************** 
Example #5
Source File: builder.py    From channel-pruning with MIT License 5 votes vote down vote up
def include(self, phase='TRAIN'):
        if phase is not None:
            includes = self.this.include.add()
            if phase == 'TRAIN':
                includes.phase = caffe_pb2.TRAIN
            elif phase == 'TEST':
                includes.phase = caffe_pb2.TEST
        else:
            NotImplementedError


    #************************** inplace ************************** 
Example #6
Source File: builder.py    From channel-pruning with MIT License 5 votes vote down vote up
def MVN(self, name=None, bottom=[], normalize_variance=True, across_channels=False, phase='TRAIN'):
        if across_channels:
            NotImplementedError
        if not normalize_variance:
            NotImplementedError
        self.setup(self.suffix('MVN', name),bottom=bottom, layer_type='MVN')
        if phase!='TRAIN':
            NotImplementedError
        self.include() 
Example #7
Source File: builder.py    From channel-pruning with MIT License 5 votes vote down vote up
def plain_func(self, name, num_output, up=False, **kwargs):
        self.conv_bn_relu(name+'_conv0', num_output=num_output, stride=1+int(up), **kwargs)
        self.conv_bn_relu(name+'_conv1', num_output=num_output, **kwargs)
    
    # def orth_loss(self, bottom_name):
    #     # self.Python('orth_loss', 'orthLossLayer', loss_weight=1, bottom=[bottom_name], top=[name], name=name)
    #     # , bottom=[bottom+'_MVN']

    #     # save bottom
    #     mainpath = self.bottom

    #     bottom = bottom_name #'NormLayer', 
    #     # self.MVN(bottom=[bottom])
    #     layer = "TransposeLayer"
    #     layername = bottom_name+'_' + layer
    #     outputs = [layername]#, bottom_name+'_zerolike']
    #     self.Python(layer, layer, top=outputs, bottom=[bottom], name=layername, phase='TRAIN')
    #     self.Matmul()
    #     # layer="diagLayer"
    #     # layername = bottom_name+'_' + layer
        
    #     # self.Python(layer, layer, top=[layername], name=layername, phase='TRAIN')
    #     outputs = [self.this.name]#, bottom_name+'_zerolike']
    #     self.EuclideanLoss(name=bottom_name+'_euclidean', bottom=outputs, loss_weight=1e-3, phase='TRAIN')
        
    #     # restore bottom
    #     self.cur = mainpath 
Example #8
Source File: builder.py    From channel-pruning with MIT License 5 votes vote down vote up
def plain(n=3):
    """6n+2, n=3 9 18 coresponds to 20 56 110 layers"""
    net_name = "plain"
    pt_folder = osp.join(osp.abspath(osp.curdir), net_name +str(6*n+2))
    name = net_name+str(6*n+2)+'-cifar10'

    solver = Solver(folder=pt_folder)
    solver.write()
    del solver

    builder = Net(name)
    builder.Data('cifar-10-batches-py/train', phase='TRAIN', crop_size=32)
    builder.Data('cifar-10-batches-py/test', phase='TEST')
    builder.plain_cifar(n, num_output = 16)
    builder.write(folder=pt_folder) 
Example #9
Source File: builder.py    From channel-pruning with MIT License 5 votes vote down vote up
def plain_orth(n=3):
    """6n+2, n=3 5 7 9 18 coresponds to 20 56 110 layers"""
    net_name = "plain-orth"
    pt_folder = osp.join(osp.abspath(osp.curdir), net_name +str(6*n+2))
    name = net_name+str(6*n+2)+'-cifar10'

    solver = Solver(folder=pt_folder)
    solver.write()
    del solver

    builder = Net(name)
    builder.Data('cifar-10-batches-py/train', phase='TRAIN', crop_size=32)
    builder.Data('cifar-10-batches-py/test', phase='TEST')
    builder.plain_cifar(n, orth=True)
    builder.write(folder=pt_folder) 
Example #10
Source File: builder.py    From channel-pruning with MIT License 5 votes vote down vote up
def plain_orth_v1(n=3):
    """6n+2, n=3 5 7 9 18 coresponds to 20 32 44 56 110 layers"""
    net_name = "plain-orth-v1-"
    pt_folder = osp.join(osp.abspath(osp.curdir), net_name +str(6*n+2))
    name = net_name+str(6*n+2)+'-cifar10'

    solver = Solver(folder=pt_folder)
    solver.write()
    del solver

    builder = Net(name)
    builder.Data('cifar-10-batches-py/train', phase='TRAIN', crop_size=32)
    builder.Data('cifar-10-batches-py/test', phase='TEST')
    builder.plain_cifar(n, orth=True, inplace=False, num_output = 16)
    builder.write(folder=pt_folder) 
Example #11
Source File: builder.py    From channel-pruning with MIT License 5 votes vote down vote up
def acc(n=3):
    """6n+2, n=3 9 18 coresponds to 20 56 110 layers"""
    net_name = "plain"
    pt_folder = osp.join(osp.abspath(osp.curdir), net_name +str(6*n+2))
    name = net_name+str(6*n+2)+'-cifar10'

    solver = Solver(folder=pt_folder)
    solver.write()
    del solver

    builder = Net(name)
    builder.Data('cifar-10-batches-py/train', phase='TRAIN', crop_size=32)
    builder.Data('cifar-10-batches-py/test', phase='TEST')
    builder.plain_cifar(n, num_output = 16, inplace=False)
    builder.write(folder=pt_folder) 
Example #12
Source File: net_generator.py    From ThiNet_Code with MIT License 5 votes vote down vote up
def include(self, phase='TRAIN'):
        if phase is not None:
            includes = self.this.include.add()
            if phase == 'TRAIN':
                includes.phase = caffe_pb2.TRAIN
            elif phase == 'TEST':
                includes.phase = caffe_pb2.TEST
        else:
            NotImplementedError

    #************************** inplace ************************** 
Example #13
Source File: net_generator.py    From ThiNet_Code with MIT License 5 votes vote down vote up
def solver_and_prototxt(compress_layer, compress_rate, compress_block):
    layers = ['2a', '2b', '2c', '3a', '3b', '3c', '3d',
              '4a', '4b', '4c', '4d', '4e', '4f', '5a', '5b', '5c']
    pt_folder = layers[compress_layer] + '_' + str(compress_block)
    if not os.path.exists(pt_folder):
        os.mkdir(pt_folder)
    name = 'resnet-' + layers[compress_layer] + str(compress_block) +'-ImageNet'

    solver = Solver(folder=pt_folder, b=compress_layer, compress_block=compress_block)
    solver.write()

    builder = Net(name)
    builder.Data('/opt/luojh/Dataset/ImageNet/lmdb/ilsvrc12_train_lmdb', backend='LMDB', phase='TRAIN', mirror=True,
                 crop_size=224, batch_size=32)
    builder.Data('/opt/luojh/Dataset/ImageNet/lmdb/ilsvrc12_val_lmdb', backend='LMDB', phase='TEST', mirror=False,
                 crop_size=224, batch_size=10)
    builder.resnet_50(layers, compress_layer, compress_rate, compress_block)
    builder.write(name='trainval.prototxt', folder=pt_folder)

    if compress_block == 0:
        compress_block = 1
        compress_layer -= 1
    else:
        compress_block =0

    builder = Net(name + '-old')
    builder.setup('data', 'Data', top=['data'])
    builder.resnet_50(layers, compress_layer, compress_rate, compress_block, deploy=True)
    builder.write(name='deploy.prototxt', folder=pt_folder, deploy=True)
    print "Finished net prototxt generation!" 
Example #14
Source File: gen_model.py    From MobileNetv2-SSDLite with MIT License 5 votes vote down vote up
def ssd_loss(self):
        layer = self.net.layer.add() 
        layer.name = "mbox_loss"
        layer.type = "MultiBoxLoss"
        layer.bottom.append("mbox_loc")
        layer.bottom.append("mbox_conf")
        layer.bottom.append("mbox_priorbox")
        layer.bottom.append("label")
        layer.top.append("mbox_loss")
        layer.include.add().phase = caffe_pb2.TRAIN
        layer.propagate_down.append(True)
        layer.propagate_down.append(True)
        layer.propagate_down.append(False)
        layer.propagate_down.append(False)
        layer.loss_param.normalization = caffe_pb2.LossParameter.VALID
        layer.multibox_loss_param.loc_loss_type = caffe_pb2.MultiBoxLossParameter.SMOOTH_L1
        layer.multibox_loss_param.conf_loss_type = caffe_pb2.MultiBoxLossParameter.LOGISTIC
        layer.multibox_loss_param.loc_weight = 1.0
        layer.multibox_loss_param.num_classes = self.class_num
        layer.multibox_loss_param.share_location = True
        layer.multibox_loss_param.match_type = caffe_pb2.MultiBoxLossParameter.PER_PREDICTION
        layer.multibox_loss_param.overlap_threshold = 0.5
        layer.multibox_loss_param.use_difficult_gt = True
        layer.multibox_loss_param.neg_pos_ratio = 3.0
        layer.multibox_loss_param.neg_overlap = 0.5
        layer.multibox_loss_param.code_type = caffe_pb2.PriorBoxParameter.CENTER_SIZE
        layer.multibox_loss_param.ignore_cross_boundary_bbox = False
        layer.multibox_loss_param.mining_type = caffe_pb2.MultiBoxLossParameter.MAX_NEGATIVE 
Example #15
Source File: gen_model.py    From MobileNetv2-SSDLite with MIT License 4 votes vote down vote up
def data_train_ssd(self):
        layer = self.net.layer.add()
        layer.name = "data"
        layer.type = "AnnotatedData"
        layer.top.append("data")
        layer.top.append("label")
        layer.include.add().phase = caffe_pb2.TRAIN

        layer.transform_param.scale = 0.007843
        layer.transform_param.mirror = True
        layer.transform_param.mean_value.append(127.5)
        layer.transform_param.mean_value.append(127.5)
        layer.transform_param.mean_value.append(127.5)
        layer.transform_param.resize_param.prob = 1.0
        layer.transform_param.resize_param.resize_mode = caffe_pb2.ResizeParameter.WARP
        layer.transform_param.resize_param.height = self.input_size
        layer.transform_param.resize_param.width = self.input_size
        layer.transform_param.resize_param.interp_mode.append(caffe_pb2.ResizeParameter.LINEAR)
        layer.transform_param.resize_param.interp_mode.append(caffe_pb2.ResizeParameter.AREA)
        layer.transform_param.resize_param.interp_mode.append(caffe_pb2.ResizeParameter.NEAREST)
        layer.transform_param.resize_param.interp_mode.append(caffe_pb2.ResizeParameter.CUBIC)
        layer.transform_param.resize_param.interp_mode.append(caffe_pb2.ResizeParameter.LANCZOS4)
        layer.transform_param.emit_constraint.emit_type = caffe_pb2.EmitConstraint.CENTER
        layer.transform_param.distort_param.brightness_prob = 0.5
        layer.transform_param.distort_param.brightness_delta = 32.0
        layer.transform_param.distort_param.contrast_lower = 0.5
        layer.transform_param.distort_param.contrast_upper = 1.5
        layer.transform_param.distort_param.hue_prob = 0.5
        layer.transform_param.distort_param.hue_delta = 18.0
        layer.transform_param.distort_param.saturation_prob = 0.5
        layer.transform_param.distort_param.saturation_lower = 0.5
        layer.transform_param.distort_param.saturation_upper = 1.5
        layer.transform_param.distort_param.random_order_prob = 0.0
        layer.transform_param.expand_param.prob = 0.5
        layer.transform_param.expand_param.max_expand_ratio = 4.0


        layer.data_param.source = self.lmdb
        layer.data_param.batch_size = 64
        layer.data_param.backend = caffe_pb2.DataParameter.LMDB

        sampler = layer.annotated_data_param.batch_sampler.add()
        sampler.max_sample = 1
        sampler.max_trials = 1
        for overlap in [0.1, 0.3, 0.5, 0.7, 0.9, 1.0]:
            sampler = layer.annotated_data_param.batch_sampler.add()
            sampler.sampler.min_scale = 0.3
            sampler.sampler.max_scale = 1.0
            sampler.sampler.min_aspect_ratio = 0.5
            sampler.sampler.max_aspect_ratio = 2.0
            sampler.sample_constraint.min_jaccard_overlap = overlap
            sampler.max_sample = 1
            sampler.max_trials = 50
        layer.annotated_data_param.label_map_file = self.label_map