Python tensorflow.contrib.slim.get_variables() Examples

The following are 11 code examples of tensorflow.contrib.slim.get_variables(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.contrib.slim , or try the search function .
Example #1
Source File: base_model.py    From defensegan with Apache License 2.0 6 votes vote down vote up
def add_save_vars(self, prefixes):
        """Prepares the list of variables that should be saved based on
        their name prefix.

        Args:
            prefixes: Variable name prefixes to find and save.
        """

        for pre in prefixes:
            pre_vars = slim.get_variables(pre)
            self.save_vars.update(pre_vars)

        var_list = ''
        for var in self.save_vars:
            var_list = var_list + var.name + ' '

        print ('Saving these variables: {}'.format(var_list)) 
Example #2
Source File: Engine.py    From PReMVOS with MIT License 5 votes vote down vote up
def try_load_weights(self):
    fn = None
    if self.load != "":
      fn = self.load.replace(".index", "")
    else:
      files = sorted(glob.glob(self.model_dir + self.model + "-*.index"))
      if len(files) > 0:
        fn = files[-1].replace(".index", "")

    if fn is not None:
      print("loading model from", fn)

      # vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='ReID_net')
      varlist = slim.get_variables()
      # print (np.unique(np.array([var.name.split('/')[0] for var in varlist])))
      good_list = ['conv0','conv1','fc1','fc2','outputTriplet', 'res0', 'res1', 'res10', 'res11','res12', 'res13',
                   'res14', 'res15', 'res16', 'res2', 'res3', 'res4', 'res5','res6', 'res7', 'res8', 'res9']
      varlist = [var for var in varlist if var.name.split('/')[0] in good_list]
      self.saver = tf.train.Saver(pad_step_number=True, var_list=varlist)

      self.saver.restore(self.session, fn)
      if self.model == fn.split("/")[-2]:
        self.start_epoch = int(fn.split("-")[-1])
        print("starting from epoch", self.start_epoch + 1)
    else:
      if self.load_init_savers is None:
        self.load_init_savers = [self._create_load_init_saver(x) for x in self.load_init]
      assert len(self.load_init) == len(self.load_init_savers)
      for fn, load_init_saver in zip(self.load_init, self.load_init_savers):
        if fn.endswith(".pickle"):
          print("trying to initialize model from wider-or-deeper mxnet model", fn)
          load_wider_or_deeper_mxnet_model(fn, self.session)
        else:
          print("initializing model from", fn)
          assert load_init_saver is not None
          load_init_saver.restore(self.session, fn) 
Example #3
Source File: base_model.py    From defensegan with Apache License 2.0 5 votes vote down vote up
def _initialize_saver(self, prefixes=None, force=False, max_to_keep=5):
        """Initializes the saver object.

        Args:
            prefixes: The prefixes that the saver should take care of.
            force (optional): Even if saver is set, reconstruct the saver
                object.
            max_to_keep (optional):
        """
        if self.saver is not None and not force:
            return
        else:
            if prefixes is None or not (
                type(prefixes) != list or type(prefixes) != tuple):
                raise ValueError(
                    'Prefix of variables that needs saving are not defined')

            prefixes_str = ''
            for pref in prefixes:
                prefixes_str = prefixes_str + pref + ' '

            print('[#] Initializing it with variable prefixes: {}'.format(
                prefixes_str))
            saved_vars = []
            for pref in prefixes:
                saved_vars.extend(slim.get_variables(pref))

            self.saver = tf.train.Saver(saved_vars, max_to_keep=max_to_keep) 
Example #4
Source File: gan.py    From defensegan with Apache License 2.0 5 votes vote down vote up
def _build(self):
        """Builds the computation graph."""

        assert (self.batch_size % self.rec_rr) == 0, 'Batch size ' \
                                                     'should be ' \
                                                     'divisable by ' \
                                                     'random restart'
        self.test_batch_size = self.batch_size

        # Defining batch_size in input placeholders is inevitable at least
        # for now, because the z vectors are Tensorflow variables.
        self.real_data_pl = tf.placeholder(
            tf.float32, shape=[self.batch_size] + self.image_dim,
        )
        self.real_data_test_pl = tf.placeholder(
            tf.float32, shape=[self.test_batch_size] + self.image_dim,
        )

        self.input_pl_transform()
        self._build_generator_discriminator()

        self.fake_data = self.generator_fn()

        self.disc_real = self.discriminator_fn(self.real_data)

        with tf.variable_scope(tf.get_variable_scope(), reuse=True):
            sc = tf.get_variable_scope()
            sc.reuse_variables()
            self.disc_fake = self.discriminator_fn(self.fake_data)

            self.generator_vars = slim.get_variables('Generator')
            self.discriminator_vars = slim.get_variables('Discriminator')

            self.fixed_noise = tf.constant(
                np.random.normal(size=(128, self.latent_dim)).astype(
                    'float32'))
            self.fixed_noise_samples = self.generator_fn(self.fixed_noise,
                                                         is_training=False) 
Example #5
Source File: bbox_estimator.py    From YOLO-3D-Box with MIT License 4 votes vote down vote up
def train(image_dir, label_dir):

    # load data & gen data
    all_objs = parse_annotation(label_dir, image_dir)
    all_exams  = len(all_objs)
    np.random.shuffle(all_objs)
    train_gen = data_gen(image_dir, all_objs, BATCH_SIZE)
    train_num = int(np.ceil(all_exams/BATCH_SIZE))
    
    ### buile graph
    dimension, orientation, confidence, loss, optimizer, loss_d, loss_o, loss_c = build_model()

    ### GPU config
    tfconfig = tf.ConfigProto(allow_soft_placement=True)
    tfconfig.gpu_options.allow_growth = True
    sess = tf.Session(config=tfconfig)

    # create a folder for saving model
    if os.path.isdir(save_path) == False:
        os.mkdir(save_path)
    variables_to_restore = slim.get_variables()[:26] ## vgg16-conv5

    saver = tf.train.Saver(max_to_keep=100)

    #Load pretrain VGG model
    ckpt_list = tf.contrib.framework.list_variables('./vgg_16.ckpt')[1:-7]
    new_ckpt_list = []
    for name in range(1,len(ckpt_list),2):
        tf.contrib.framework.init_from_checkpoint('./vgg_16.ckpt', {ckpt_list[name-1][0]: variables_to_restore[name]})
        tf.contrib.framework.init_from_checkpoint('./vgg_16.ckpt', {ckpt_list[name][0]: variables_to_restore[name-1]})

    # Initializing the variables
    init = tf.global_variables_initializer()
    print(sess.run(init))


    # Start to train model
    for epoch in range(epochs):
        epoch_loss = np.zeros((train_num,1),dtype = float)
        tStart_epoch = time.time()
        batch_loss = 0.0
        for num_iters in tqdm(range(train_num),ascii=True,desc='Epoch '+str(epoch+1)+' : Loss:'+str(batch_loss)):
            train_img, train_label = train_gen.next()
            _,batch_loss = sess.run([optimizer,loss], feed_dict={inputs: train_img, d_label: train_label[0], o_label: train_label[1], c_label: train_label[2]})

            epoch_loss[num_iters] = batch_loss 

        # save model
        if (epoch+1) % 5 == 0:
            saver.save(sess,save_path+"model", global_step = epoch+1)

        # Print some information
        print("Epoch:", epoch+1, " done. Loss:", np.mean(epoch_loss))
        tStop_epoch = time.time()
        print("Epoch Time Cost:", round(tStop_epoch - tStart_epoch,2), "s")
        sys.stdout.flush() 
Example #6
Source File: main.py    From 3D-Deepbox with MIT License 4 votes vote down vote up
def train(image_dir, box2d_loc, label_dir):

    # load data & gen data
    all_objs = parse_annotation(label_dir, image_dir)
    all_exams  = len(all_objs)
    np.random.shuffle(all_objs)
    train_gen = data_gen(image_dir, all_objs, BATCH_SIZE)
    train_num = int(np.ceil(all_exams/BATCH_SIZE))
    
    ### buile graph
    dimension, orientation, confidence, loss, optimizer, loss_d, loss_o, loss_c = build_model()

    ### GPU config
    tfconfig = tf.ConfigProto(allow_soft_placement=True)
    tfconfig.gpu_options.allow_growth = True
    sess = tf.Session(config=tfconfig)

    # create a folder for saving model
    if os.path.isdir(save_path) == False:
        os.mkdir(save_path)
    variables_to_restore = slim.get_variables()[:26] ## vgg16-conv5

    saver = tf.train.Saver(max_to_keep=100)

    #Load pretrain VGG model
    ckpt_list = tf.contrib.framework.list_variables('./vgg_16.ckpt')[1:-7]
    new_ckpt_list = []
    for name in range(1,len(ckpt_list),2):
        tf.contrib.framework.init_from_checkpoint('./vgg_16.ckpt', {ckpt_list[name-1][0]: variables_to_restore[name]})
        tf.contrib.framework.init_from_checkpoint('./vgg_16.ckpt', {ckpt_list[name][0]: variables_to_restore[name-1]})

    # Initializing the variables
    init = tf.global_variables_initializer()
    sess.run(init)


    # Start to train model
    for epoch in range(epochs):
        epoch_loss = np.zeros((train_num,1),dtype = float)
        tStart_epoch = time.time()
        batch_loss = 0.0
        for num_iters in tqdm(range(train_num),ascii=True,desc='Epoch '+str(epoch+1)+' : Loss:'+str(batch_loss)):
            train_img, train_label = train_gen.next()
            _,batch_loss = sess.run([optimizer,loss], feed_dict={inputs: train_img, d_label: train_label[0], o_label: train_label[1], c_label: train_label[2]})

            epoch_loss[num_iters] = batch_loss 

        # save model
        if (epoch+1) % 5 == 0:
            saver.save(sess,save_path+"model", global_step = epoch+1)

        # Print some information
        print "Epoch:", epoch+1, " done. Loss:", np.mean(epoch_loss)
        tStop_epoch = time.time()
        print "Epoch Time Cost:", round(tStop_epoch - tStart_epoch,2), "s"
        sys.stdout.flush() 
Example #7
Source File: train_bier.py    From bier with GNU General Public License v3.0 4 votes vote down vote up
def build_pairwise_tower_loss(fvecs_i, fvecs_j, scope=None,
                              lambda_weight=LAMBDA_WEIGHT):
    """
    Builds an adversarial regressor from fvecs_j to fvecs_i.

    Args:
        fvecs_i: the target embedding (i.e. the smaller embedding)
        fvecs_j: the source embedding (i.e. the begger embedding)
        scope: scope name of the regressor.
        lambda_weight: the regularization parameter for the weights.
    Returns:
        An adversarial regressor loss function.
    """
    # build a regressor from fvecs_j to fvecs_i
    fvecs_i = flip_gradient.flip_gradient(fvecs_i)
    fvecs_j = flip_gradient.flip_gradient(fvecs_j)
    net = fvecs_j

    bias_loss = 0.0
    weight_loss = 0.0
    adversarial_loss = 0.0
    with tf.variable_scope(scope):
        for i in xrange(NUM_HIDDENS_ADVERSARIAL):
            if i < NUM_HIDDENS_ADVERSARIAL - 1:
                net = slim.fully_connected(
                    net, HIDDEN_ADVERSARIAL_SIZE, scope='fc_{}'.format(i),
                    activation_fn=tf.nn.relu)
            else:
                net = slim.fully_connected(net, fvecs_i.get_shape().as_list(
                )[-1], scope='fc_{}'.format(i), activation_fn=None)
            b = slim.get_variables(
                scope=tf.get_variable_scope().name + '/fc_{}/biases'.format(i)
            )[0]
            W = slim.get_variables(
                scope=tf.get_variable_scope().name + '/fc_{}/weights'.format(i)
            )[0]
            weight_loss += tf.reduce_mean(
                tf.square(tf.reduce_sum(W * W, axis=1) - 1)) * lambda_weight
            if b is not None:
                bias_loss += tf.maximum(
                    0.0,
                    tf.reduce_sum(b * b) - 1.0) * lambda_weight
        adversarial_loss += -tf.reduce_mean(tf.square(fvecs_i * net))
        tf.summary.scalar('adversarial loss', adversarial_loss)
        tf.summary.scalar('weight loss', weight_loss)
        tf.summary.scalar('bias loss', bias_loss)
    return adversarial_loss + weight_loss + bias_loss 
Example #8
Source File: encoder_decoder_cgan_softmax.py    From taskonomy with MIT License 4 votes vote down vote up
def build_train_op( self, global_step ):
        '''
            Builds two train ops, one for the Generator and one for the Discriminator. These can 
            be run independently any number of times, and each time will increase the global_step.

            Args:
                global_step: A Tensor to be incremented
            Returns:
                [ g_train_op, d_train_op ]
        '''
        if not self.model_built or not self.losses_built :
            raise RuntimeError( "Cannot build optimizers until 'build_model' ({0}) and 'get_losses' {1} are run".format(
                    self.model_built, self.losses_built ) )
        self.global_step = global_step
        self.global_step_copy = tf.identity( global_step, name='global_step_copy' )

        t_vars = tf.trainable_variables()
    
        # Create the optimizer train_op for the generator
        self.g_optimizer = optimize.build_optimizer( global_step=self.global_step, cfg=self.cfg )
        self.g_vars = slim.get_variables( scope='encoder', collection=tf.GraphKeys.TRAINABLE_VARIABLES )
        self.g_vars += slim.get_variables( scope='decoder', collection=tf.GraphKeys.TRAINABLE_VARIABLES )
        self.g_train_op = optimize.create_train_op( self.loss_g_total, self.g_optimizer, 
                    variables_to_train=self.g_vars, update_global_step=True )
        self.g_lnorm_op = optimize.create_train_op( self.softmax_loss, self.g_optimizer, 
                    variables_to_train=self.g_vars, update_global_step=True )


        # Create a train_op for the discriminator
        if 'discriminator_learning_args' in self.cfg: # use these
            discriminator_learning_args = self.cfg[ 'discriminator_learning_args' ]
        else:
            discriminator_learning_args = self.cfg 
        self.d_optimizer = optimize.build_optimizer( global_step=self.global_step, cfg=discriminator_learning_args )
        self.d_vars = slim.get_variables( scope='discriminator', collection=tf.GraphKeys.TRAINABLE_VARIABLES )
        self.d_vars += slim.get_variables( scope='discriminator_1', collection=tf.GraphKeys.TRAINABLE_VARIABLES )
        self.d_train_op = slim.learning.create_train_op( self.loss_d_total, self.d_optimizer, 
                    variables_to_train=self.d_vars )

        self.train_op = [ self.g_train_op, self.d_train_op, self.g_lnorm_op, self.accuracy]
        self.train_op_built = True
        return self.train_op 
Example #9
Source File: encoder_decoder_cgan.py    From taskonomy with MIT License 4 votes vote down vote up
def build_train_op( self, global_step ):
        '''
            Builds two train ops, one for the Generator and one for the Discriminator. These can 
            be run independently any number of times, and each time will increase the global_step.

            Args:
                global_step: A Tensor to be incremented
            Returns:
                [ g_train_op, d_train_op ]
        '''
        if not self.model_built or not self.losses_built :
            raise RuntimeError( "Cannot build optimizers until 'build_model' ({0}) and 'get_losses' {1} are run".format(
                    self.model_built, self.losses_built ) )
        self.global_step = global_step
        self.global_step_copy = tf.identity( global_step, name='global_step_copy' )

        t_vars = tf.trainable_variables()
    
        # Create the optimizer train_op for the generator
        self.g_optimizer = optimize.build_optimizer( global_step=self.global_step, cfg=self.cfg )
        self.g_vars = slim.get_variables( scope='encoder', collection=tf.GraphKeys.TRAINABLE_VARIABLES )
        self.g_vars += slim.get_variables( scope='decoder', collection=tf.GraphKeys.TRAINABLE_VARIABLES )
        self.g_train_op = optimize.create_train_op( self.loss_g_total, self.g_optimizer, 
                    variables_to_train=self.g_vars, update_global_step=True )

        self.g_lnorm_op = optimize.create_train_op( self.l1_loss, self.g_optimizer, 
                    variables_to_train=self.g_vars, update_global_step=True )

        # Create a train_op for the discriminator
        if 'discriminator_learning_args' in self.cfg: # use these
            discriminator_learning_args = self.cfg[ 'discriminator_learning_args' ]
        else:
            discriminator_learning_args = self.cfg 
        self.d_optimizer = optimize.build_optimizer( global_step=self.global_step, cfg=discriminator_learning_args )
        self.d_vars = slim.get_variables( scope='discriminator', collection=tf.GraphKeys.TRAINABLE_VARIABLES )
        self.d_vars += slim.get_variables( scope='discriminator_1', collection=tf.GraphKeys.TRAINABLE_VARIABLES )
        self.d_train_op = slim.learning.create_train_op( self.loss_d_total, self.d_optimizer, 
                    variables_to_train=self.d_vars )

        self.train_op = [ self.g_train_op, self.d_train_op, self.g_lnorm_op ]
        self.train_op_built = True
        return self.train_op 
Example #10
Source File: encoder_decoder_cgan_softmax.py    From taskonomy with MIT License 4 votes vote down vote up
def build_train_op( self, global_step ):
        '''
            Builds two train ops, one for the Generator and one for the Discriminator. These can 
            be run independently any number of times, and each time will increase the global_step.

            Args:
                global_step: A Tensor to be incremented
            Returns:
                [ g_train_op, d_train_op ]
        '''
        if not self.model_built or not self.losses_built :
            raise RuntimeError( "Cannot build optimizers until 'build_model' ({0}) and 'get_losses' {1} are run".format(
                    self.model_built, self.losses_built ) )
        self.global_step = global_step
        self.global_step_copy = tf.identity( global_step, name='global_step_copy' )

        t_vars = tf.trainable_variables()
    
        # Create the optimizer train_op for the generator
        self.g_optimizer = optimize.build_optimizer( global_step=self.global_step, cfg=self.cfg )
        self.g_vars = slim.get_variables( scope='encoder', collection=tf.GraphKeys.TRAINABLE_VARIABLES )
        self.g_vars += slim.get_variables( scope='decoder', collection=tf.GraphKeys.TRAINABLE_VARIABLES )
        self.g_train_op = optimize.create_train_op( self.loss_g_total, self.g_optimizer, 
                    variables_to_train=self.g_vars, update_global_step=True )
        self.g_lnorm_op = optimize.create_train_op( self.softmax_loss, self.g_optimizer, 
                    variables_to_train=self.g_vars, update_global_step=True )


        # Create a train_op for the discriminator
        if 'discriminator_learning_args' in self.cfg: # use these
            discriminator_learning_args = self.cfg[ 'discriminator_learning_args' ]
        else:
            discriminator_learning_args = self.cfg 
        self.d_optimizer = optimize.build_optimizer( global_step=self.global_step, cfg=discriminator_learning_args )
        self.d_vars = slim.get_variables( scope='discriminator', collection=tf.GraphKeys.TRAINABLE_VARIABLES )
        self.d_vars += slim.get_variables( scope='discriminator_1', collection=tf.GraphKeys.TRAINABLE_VARIABLES )
        self.d_train_op = slim.learning.create_train_op( self.loss_d_total, self.d_optimizer, 
                    variables_to_train=self.d_vars )

        self.train_op = [ self.g_train_op, self.d_train_op, self.g_lnorm_op, self.accuracy]
        self.train_op_built = True
        return self.train_op 
Example #11
Source File: encoder_decoder_cgan.py    From taskonomy with MIT License 4 votes vote down vote up
def build_train_op( self, global_step ):
        '''
            Builds two train ops, one for the Generator and one for the Discriminator. These can 
            be run independently any number of times, and each time will increase the global_step.

            Args:
                global_step: A Tensor to be incremented
            Returns:
                [ g_train_op, d_train_op ]
        '''
        if not self.model_built or not self.losses_built :
            raise RuntimeError( "Cannot build optimizers until 'build_model' ({0}) and 'get_losses' {1} are run".format(
                    self.model_built, self.losses_built ) )
        self.global_step = global_step
        self.global_step_copy = tf.identity( global_step, name='global_step_copy' )

        t_vars = tf.trainable_variables()
    
        # Create the optimizer train_op for the generator
        self.g_optimizer = optimize.build_optimizer( global_step=self.global_step, cfg=self.cfg )
        self.g_vars = slim.get_variables( scope='encoder', collection=tf.GraphKeys.TRAINABLE_VARIABLES )
        self.g_vars += slim.get_variables( scope='decoder', collection=tf.GraphKeys.TRAINABLE_VARIABLES )
        self.g_train_op = optimize.create_train_op( self.loss_g_total, self.g_optimizer, 
                    variables_to_train=self.g_vars, update_global_step=True )

        self.g_lnorm_op = optimize.create_train_op( self.l1_loss, self.g_optimizer, 
                    variables_to_train=self.g_vars, update_global_step=True )

        # Create a train_op for the discriminator
        if 'discriminator_learning_args' in self.cfg: # use these
            discriminator_learning_args = self.cfg[ 'discriminator_learning_args' ]
        else:
            discriminator_learning_args = self.cfg 
        self.d_optimizer = optimize.build_optimizer( global_step=self.global_step, cfg=discriminator_learning_args )
        self.d_vars = slim.get_variables( scope='discriminator', collection=tf.GraphKeys.TRAINABLE_VARIABLES )
        self.d_vars += slim.get_variables( scope='discriminator_1', collection=tf.GraphKeys.TRAINABLE_VARIABLES )
        self.d_train_op = slim.learning.create_train_op( self.loss_d_total, self.d_optimizer, 
                    variables_to_train=self.d_vars )

        self.train_op = [ self.g_train_op, self.d_train_op, self.g_lnorm_op ]
        self.train_op_built = True
        return self.train_op