Python chainer.optimizers.AdaGrad() Examples

The following are 5 code examples of chainer.optimizers.AdaGrad(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module chainer.optimizers , or try the search function .
Example #1
Source File: train_utils.py    From chainer-segnet with MIT License 6 votes vote down vote up
def get_optimizer(opt, lr=None, adam_alpha=None, adam_beta1=None,
                  adam_beta2=None, adam_eps=None, weight_decay=None):
    if opt == 'MomentumSGD':
        optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
    elif opt == 'Adam':
        optimizer = optimizers.Adam(
            alpha=adam_alpha, beta1=adam_beta1,
            beta2=adam_beta2, eps=adam_eps)
    elif opt == 'AdaGrad':
        optimizer = optimizers.AdaGrad(lr=lr)
    elif opt == 'RMSprop':
        optimizer = optimizers.RMSprop(lr=lr)
    else:
        raise Exception('No optimizer is selected')

    # The first model as the master model
    if opt == 'MomentumSGD':
        optimizer.decay = weight_decay

    return optimizer 
Example #2
Source File: train.py    From ssai-cnn with MIT License 5 votes vote down vote up
def get_model_optimizer(args):
    model = get_model(args)

    if 'opt' in args:
        # prepare optimizer
        if args.opt == 'MomentumSGD':
            optimizer = optimizers.MomentumSGD(lr=args.lr, momentum=0.9)
        elif args.opt == 'Adam':
            optimizer = optimizers.Adam(alpha=args.alpha)
        elif args.opt == 'AdaGrad':
            optimizer = optimizers.AdaGrad(lr=args.lr)
        else:
            raise Exception('No optimizer is selected')

        optimizer.setup(model)

        if args.opt == 'MomentumSGD':
            optimizer.add_hook(
                chainer.optimizer.WeightDecay(args.weight_decay))

        if args.resume_opt is not None:
            serializers.load_hdf5(args.resume_opt, optimizer)
            args.epoch_offset = int(
                re.search('epoch-([0-9]+)', args.resume_opt).groups()[0])

        return model, optimizer
    else:
        print('No optimizer generated.')
        return model 
Example #3
Source File: test_optimizers_by_linear_model.py    From chainer with MIT License 5 votes vote down vote up
def create(self):
        return optimizers.AdaGrad(0.1) 
Example #4
Source File: train.py    From deeppose with GNU General Public License v2.0 5 votes vote down vote up
def get_optimizer(model, opt, lr, adam_alpha=None, adam_beta1=None,
                  adam_beta2=None, adam_eps=None, weight_decay=None,
                  resume_opt=None):
    if opt == 'MomentumSGD':
        optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
    elif opt == 'Adam':
        optimizer = optimizers.Adam(
            alpha=adam_alpha, beta1=adam_beta1,
            beta2=adam_beta2, eps=adam_eps)
    elif opt == 'AdaGrad':
        optimizer = optimizers.AdaGrad(lr=lr)
    elif opt == 'RMSprop':
        optimizer = optimizers.RMSprop(lr=lr)
    else:
        raise Exception('No optimizer is selected')

    # The first model as the master model
    optimizer.setup(model)

    if opt == 'MomentumSGD':
        optimizer.add_hook(
            chainer.optimizer.WeightDecay(weight_decay))

    if resume_opt is not None:
        serializers.load_npz(resume_opt, optimizer)

    return optimizer 
Example #5
Source File: train.py    From ssai-cnn with MIT License 4 votes vote down vote up
def create_args():
    parser = argparse.ArgumentParser()

    # Training settings
    parser.add_argument('--model', type=str,
                        default='models/MnihCNN_multi.py')
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--epoch', type=int, default=400)
    parser.add_argument('--batchsize', type=int, default=128)
    parser.add_argument('--dataset_size', type=float, default=1.0)
    parser.add_argument('--aug_threads', type=int, default=8)
    parser.add_argument('--snapshot', type=int, default=10)
    parser.add_argument('--resume_model', type=str, default=None)
    parser.add_argument('--resume_opt', type=str, default=None)
    parser.add_argument('--epoch_offset', type=int, default=0)

    # Dataset paths
    parser.add_argument('--train_ortho_db', type=str,
                        default='data/mass_merged/lmdb/train_sat')
    parser.add_argument('--train_label_db', type=str,
                        default='data/mass_merged/lmdb/train_map')
    parser.add_argument('--valid_ortho_db', type=str,
                        default='data/mass_merged/lmdb/valid_sat')
    parser.add_argument('--valid_label_db', type=str,
                        default='data/mass_merged/lmdb/valid_map')

    # Dataset info
    parser.add_argument('--ortho_original_side', type=int, default=92)
    parser.add_argument('--label_original_side', type=int, default=24)
    parser.add_argument('--ortho_side', type=int, default=64)
    parser.add_argument('--label_side', type=int, default=16)

    # Options for data augmentation
    parser.add_argument('--fliplr', type=int, default=1)
    parser.add_argument('--rotate', type=int, default=1)
    parser.add_argument('--angle', type=int, default=90)
    parser.add_argument('--norm', type=int, default=1)
    parser.add_argument('--crop', type=int, default=1)

    # Optimization settings
    parser.add_argument('--opt', type=str, default='MomentumSGD',
                        choices=['MomentumSGD', 'Adam', 'AdaGrad'])
    parser.add_argument('--weight_decay', type=float, default=0.0005)
    parser.add_argument('--alpha', type=float, default=0.001)
    parser.add_argument('--lr', type=float, default=0.0005)
    parser.add_argument('--lr_decay_freq', type=int, default=100)
    parser.add_argument('--lr_decay_ratio', type=float, default=0.1)
    parser.add_argument('--seed', type=int, default=1701)

    args = parser.parse_args()

    return args