Python utils.get_optimizer() Examples
The following are 3
code examples of utils.get_optimizer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils
, or try the search function
.
Example #1
Source File: models.py From Recommender-Systems-Samples with MIT License | 5 votes |
def __init__(self, input_dim=None, output_dim=1, factor_order=10, init_path=None, opt_algo='gd', learning_rate=1e-2, l2_w=0, l2_v=0, random_seed=None): Model.__init__(self) init_vars = [('w', [input_dim, output_dim], 'xavier', dtype), ('v', [input_dim, factor_order], 'xavier', dtype), ('b', [output_dim], 'zero', dtype)] self.graph = tf.Graph() with self.graph.as_default(): if random_seed is not None: tf.set_random_seed(random_seed) self.X = tf.sparse_placeholder(dtype) self.y = tf.placeholder(dtype) self.vars = utils.init_var_map(init_vars, init_path) X_square = tf.SparseTensor(self.X.indices, tf.square(self.X.values), tf.to_int64(tf.shape(self.X))) xv = tf.square(tf.sparse_tensor_dense_matmul(self.X, self.vars['v'])) p = 0.5 * tf.reshape( tf.reduce_sum(xv - tf.sparse_tensor_dense_matmul(X_square, tf.square(self.vars['v'])), 1), [-1, output_dim]) xw = tf.sparse_tensor_dense_matmul(self.X, self.vars['w']) logits = tf.reshape(xw + self.vars['b'] + p, [-1]) self.y_prob = tf.sigmoid(logits) self.loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=self.y)) + \ l2_w * tf.nn.l2_loss(xw) + \ l2_v * tf.nn.l2_loss(xv) self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss) config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) tf.global_variables_initializer().run(session=self.sess)
Example #2
Source File: trainer.py From ChaLearn_liveness_challenge with MIT License | 4 votes |
def __init__(self, opt): self.opt = opt self.device = torch.device("cuda" if opt.ngpu else "cpu") self.model, self.classifier = models.get_model(opt.net_type, opt.classifier_type, opt.pretrained, int(opt.nclasses)) self.model = self.model.to(self.device) self.classifier = self.classifier.to(self.device) if opt.ngpu>1: self.model = nn.DataParallel(self.model) self.loss = models.init_loss(opt.loss_type) self.loss = self.loss.to(self.device) self.optimizer = utils.get_optimizer(self.model, self.opt) self.lr_scheduler = utils.get_lr_scheduler(self.opt, self.optimizer) self.alpha_scheduler = utils.get_margin_alpha_scheduler(self.opt) self.train_loader = datasets.generate_loader(opt,'train') self.test_loader = datasets.generate_loader(opt,'val') self.epoch = 0 self.best_epoch = False self.training = False self.state = {} self.train_loss = utils.AverageMeter() self.test_loss = utils.AverageMeter() self.batch_time = utils.AverageMeter() self.test_metrics = utils.ROCMeter() self.best_test_loss = utils.AverageMeter() self.best_test_loss.update(np.array([np.inf])) self.visdom_log_file = os.path.join(self.opt.out_path, 'log_files', 'visdom.log') self.vis = Visdom(port = opt.visdom_port, log_to_filename=self.visdom_log_file, env=opt.exp_name + '_' + str(opt.fold)) self.vis_loss_opts = {'xlabel': 'epoch', 'ylabel': 'loss', 'title':'losses', 'legend': ['train_loss', 'val_loss']} self.vis_tpr_opts = {'xlabel': 'epoch', 'ylabel': 'tpr', 'title':'val_tpr', 'legend': ['tpr@fpr10-2', 'tpr@fpr10-3', 'tpr@fpr10-4']} self.vis_epochloss_opts = {'xlabel': 'epoch', 'ylabel': 'loss', 'title':'epoch_losses', 'legend': ['train_loss', 'val_loss']}
Example #3
Source File: trainer.py From ChaLearn_liveness_challenge with MIT License | 4 votes |
def __init__(self, opt): self.opt = opt self.device = torch.device("cuda" if opt.ngpu else "cpu") self.model, self.classifier = models.get_model(opt.net_type, opt.loss_type, opt.pretrained, int(opt.nclasses)) self.model = self.model.to(self.device) self.classifier = self.classifier.to(self.device) if opt.ngpu>1: self.model = nn.DataParallel(self.model) self.loss = models.init_loss(opt.loss_type) self.loss = self.loss.to(self.device) self.optimizer = utils.get_optimizer(self.model, self.opt) self.lr_scheduler = utils.get_lr_scheduler(self.opt, self.optimizer) self.train_loader = datasets.generate_loader(opt,'train') self.test_loader = datasets.generate_loader(opt,'val') self.epoch = 0 self.best_epoch = False self.training = False self.state = {} self.train_loss = utils.AverageMeter() self.test_loss = utils.AverageMeter() self.batch_time = utils.AverageMeter() if self.opt.loss_type in ['cce', 'bce', 'mse', 'arc_margin']: self.test_metrics = utils.AverageMeter() else: self.test_metrics = utils.ROCMeter() self.best_test_loss = utils.AverageMeter() self.best_test_loss.update(np.array([np.inf])) self.visdom_log_file = os.path.join(self.opt.out_path, 'log_files', 'visdom.log') self.vis = Visdom(port = opt.visdom_port, log_to_filename=self.visdom_log_file, env=opt.exp_name + '_' + str(opt.fold)) self.vis_loss_opts = {'xlabel': 'epoch', 'ylabel': 'loss', 'title':'losses', 'legend': ['train_loss', 'val_loss']} self.vis_epochloss_opts = {'xlabel': 'epoch', 'ylabel': 'loss', 'title':'epoch_losses', 'legend': ['train_loss', 'val_loss']}