Python model.build_model() Examples

The following are 12 code examples of model.build_model(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module model , or try the search function .
Example #1
Source File: main.py    From reid_baseline_with_syncbn with MIT License 6 votes vote down vote up
def train(args):
    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)
    shutil.copy(args.config_file, cfg.OUTPUT_DIR)

    num_gpus = torch.cuda.device_count()

    logger = setup_logger('reid_baseline', output_dir, 0)
    logger.info('Using {} GPUS'.format(num_gpus))
    logger.info(args)
    logger.info('Running with config:\n{}'.format(cfg))

    train_dl, val_dl, num_query, num_classes = make_dataloader(cfg, num_gpus) 

    model = build_model(cfg, num_classes)

    loss_func = make_loss(cfg, num_classes)

    trainer = BaseTrainer(cfg, model, train_dl, val_dl,
                          loss_func, num_query, num_gpus)

    for epoch in range(trainer.epochs):
        for batch in trainer.train_dl:
            trainer.step(batch)
            trainer.handle_new_batch()
        trainer.handle_new_epoch() 
Example #2
Source File: train.py    From WaveRNN-Pytorch with MIT License 6 votes vote down vote up
def test_eval():
    data_root = "data_dir"
    dataset = AudiobookDataset(data_root)
    if hp.input_type == 'raw':
        collate_fn = raw_collate
    elif hp.input_type == 'bits':
        collate_fn = discrete_collate
    else:
        raise ValueError("input_type:{} not supported".format(hp.input_type))
    data_loader = DataLoader(dataset, collate_fn=collate_fn, shuffle=True, num_workers=0, batch_size=hp.batch_size)
    device = torch.device("cuda" if use_cuda else "cpu")
    print("using device:{}".format(device))

    # build model, create optimizer
    model = build_model().to(device)

    evaluate_model(model, data_loader) 
Example #3
Source File: test_cifar.py    From NAO with GNU General Public License v3.0 6 votes vote down vote up
def get_test_ops(x, y, params, reuse=False):
  with tf.device('/gpu:0'):
    inputs = tf.reshape(x, [-1, _HEIGHT, _WIDTH, _DEPTH])
    labels = y
    res = model.build_model(inputs, params, False, reuse)
    logits = res['logits']
    cross_entropy = tf.losses.softmax_cross_entropy(
      logits=logits, onehot_labels=labels)
    # Add weight decay to the loss.
    loss = cross_entropy + params['weight_decay'] * tf.add_n(
      [tf.nn.l2_loss(v) for v in tf.trainable_variables()])

    if 'aux_logits' in res:
      aux_logits = res['aux_logits']
      aux_loss = tf.losses.softmax_cross_entropy(
        logits=aux_logits, onehot_labels=labels, weights=params['aux_head_weight'])
      loss += aux_loss

    predictions = tf.argmax(logits, axis=1)
    labels = tf.argmax(y, axis=1)
    test_accuracy = tf.reduce_mean(tf.cast(tf.equal(predictions, labels), dtype=tf.float32))
    return loss, test_accuracy 
Example #4
Source File: train_cifar.py    From NAO with GNU General Public License v3.0 6 votes vote down vote up
def get_valid_ops(x, y, params, reuse=False):
  with tf.device('/gpu:0'):
    inputs = tf.reshape(x, [-1, _HEIGHT, _WIDTH, _DEPTH])
    labels = y
    res = model.build_model(inputs, params, False, reuse)
    logits = res['logits']
    cross_entropy = tf.losses.softmax_cross_entropy(
      logits=logits, onehot_labels=labels)
    # Add weight decay to the loss.
    loss = cross_entropy + params['weight_decay'] * tf.add_n(
      [tf.nn.l2_loss(v) for v in tf.trainable_variables()])

    if 'aux_logits' in res:
      aux_logits = res['aux_logits']
      aux_loss = tf.losses.softmax_cross_entropy(
        logits=aux_logits, onehot_labels=labels, weights=params['aux_head_weight'])
      loss += aux_loss
    predictions = tf.argmax(logits, axis=1)
    labels = tf.argmax(y, axis=1)
    valid_accuracy = tf.reduce_mean(tf.cast(tf.equal(predictions, labels), dtype=tf.float32))
    return loss, valid_accuracy 
Example #5
Source File: train_cifar.py    From NAO with GNU General Public License v3.0 6 votes vote down vote up
def get_test_ops(x, y, params, reuse=False):
  with tf.device('/gpu:0'):
    inputs = tf.reshape(x, [-1, _HEIGHT, _WIDTH, _DEPTH])
    labels = y
    res = model.build_model(inputs, params, False, reuse)
    logits = res['logits']
    cross_entropy = tf.losses.softmax_cross_entropy(
      logits=logits, onehot_labels=labels)
    # Add weight decay to the loss.
    loss = cross_entropy + params['weight_decay'] * tf.add_n(
      [tf.nn.l2_loss(v) for v in tf.trainable_variables()])

    if 'aux_logits' in res:
      aux_logits = res['aux_logits']
      aux_loss = tf.losses.softmax_cross_entropy(
        logits=aux_logits, onehot_labels=labels, weights=params['aux_head_weight'])
      loss += aux_loss

    predictions = tf.argmax(logits, axis=1)
    labels = tf.argmax(y, axis=1)
    test_accuracy = tf.reduce_mean(tf.cast(tf.equal(predictions, labels), dtype=tf.float32))
    return loss, test_accuracy 
Example #6
Source File: plot.py    From adversarial-autoencoder with MIT License 5 votes vote down vote up
def plot_autoencoder(weightsfile):
    print('building model')
    layers = model.build_model()

    batch_size = 128

    print('compiling theano function')
    encoder_func = theano_funcs.create_encoder_func(layers)

    print('loading weights from %s' % (weightsfile))
    model.load_weights([
        layers['l_decoder_out'],
        layers['l_discriminator_out'],
    ], weightsfile)

    print('loading data')
    X_train, y_train, X_test, y_test = utils.load_mnist()

    train_datapoints = []
    print('transforming training data')
    for train_idx in get_batch_idx(X_train.shape[0], batch_size):
        X_train_batch = X_train[train_idx]
        train_batch_codes = encoder_func(X_train_batch)
        train_datapoints.append(train_batch_codes)

    test_datapoints = []
    print('transforming test data')
    for test_idx in get_batch_idx(X_test.shape[0], batch_size):
        X_test_batch = X_test[test_idx]
        test_batch_codes = encoder_func(X_test_batch)
        test_datapoints.append(test_batch_codes)

    Z_train = np.vstack(train_datapoints)
    Z_test = np.vstack(test_datapoints)

    plot(Z_train, y_train, Z_test, y_test,
         filename='adversarial_train_val.png',
         title='projected onto latent space of autoencoder') 
Example #7
Source File: predict.py    From facial-expression-recognition-using-cnn with GNU General Public License v3.0 5 votes vote down vote up
def load_model():
    model = None
    with tf.Graph().as_default():
        print( "loading pretrained model...")
        network = build_model()
        model = DNN(network)
        if os.path.isfile(TRAINING.save_model_path):
            model.load(TRAINING.save_model_path)
        else:
            print( "Error: file '{}' not found".format(TRAINING.save_model_path))
    return model 
Example #8
Source File: train.py    From WaveRNN-Pytorch with MIT License 5 votes vote down vote up
def test_save_checkpoint():
    checkpoint_path = "checkpoints/"
    device = torch.device("cuda" if use_cuda else "cpu")
    model = build_model()
    optimizer = optim.Adam(model.parameters(), lr=1e-4)
    global global_step, global_epoch, global_test_step
    save_checkpoint(device, model, optimizer, global_step, checkpoint_path, global_epoch)

    model = load_checkpoint(checkpoint_path+"checkpoint_step000000000.pth", model, optimizer, False) 
Example #9
Source File: tracker.py    From MemTrack with MIT License 5 votes vote down vote up
def __init__(self, sess, checkpoint_dir=None):

        self.z_file_init = tf.placeholder(tf.string, [], name='z_filename_init')
        self.z_roi_init = tf.placeholder(tf.float32, [1, 4], name='z_roi_init')
        self.z_file = tf.placeholder(tf.string, [], name='z_filename')
        self.z_roi = tf.placeholder(tf.float32, [1, 4], name='z_roi')
        self.x_file = tf.placeholder(tf.string, [], name='x_filename')
        self.x_roi = tf.placeholder(tf.float32, [config.num_scale, 4], name='x_roi')

        init_z_exemplar,_ = self._read_and_crop_image(self.z_file_init, self.z_roi_init, [config.z_exemplar_size, config.z_exemplar_size])
        init_z_exemplar = tf.reshape(init_z_exemplar, [1, 1, config.z_exemplar_size, config.z_exemplar_size, 3])
        init_z_exemplar = tf.tile(init_z_exemplar, [config.num_scale, 1, 1, 1, 1])
        z_exemplar,_ = self._read_and_crop_image(self.z_file, self.z_roi, [config.z_exemplar_size, config.z_exemplar_size])
        z_exemplar = tf.reshape(z_exemplar, [1, 1, config.z_exemplar_size, config.z_exemplar_size, 3])
        z_exemplar = tf.tile(z_exemplar, [config.num_scale, 1, 1, 1, 1])
        self.x_instances, self.image = self._read_and_crop_image(self.x_file, self.x_roi, [config.x_instance_size, config.x_instance_size])
        self.x_instances = tf.reshape(self.x_instances, [config.num_scale, 1, config.x_instance_size, config.x_instance_size, 3])

        with tf.variable_scope('mann'):
            mem_cell = MemNet(config.hidden_size, config.memory_size, config.slot_size, False)

        self.initial_state = build_initial_state(init_z_exemplar, mem_cell, ModeKeys.PREDICT)
        self.response, saver, self.final_state = build_model(z_exemplar, self.x_instances, mem_cell, self.initial_state, ModeKeys.PREDICT)
        self.att_score = mem_cell.att_score

        up_response_size = config.response_size * config.response_up
        self.up_response = tf.squeeze(tf.image.resize_images(tf.expand_dims(self.response, -1),
                                                             [up_response_size, up_response_size],
                                                             method=tf.image.ResizeMethod.BICUBIC,
                                                             align_corners=True), -1)
        if checkpoint_dir is not None:
            saver.restore(sess, checkpoint_dir)
            self._sess = sess
        else:
            ckpt = tf.train.get_checkpoint_state(config.checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                self._sess = sess 
Example #10
Source File: plot.py    From adversarial-autoencoder with MIT License 4 votes vote down vote up
def plot_latent_space(weightsfile):
    print('building model')
    layers = model.build_model()
    batch_size = 128
    decoder_func = theano_funcs.create_decoder_func(layers)

    print('loading weights from %s' % (weightsfile))
    model.load_weights([
        layers['l_decoder_out'],
        layers['l_discriminator_out'],
    ], weightsfile)

    # regularly-spaced grid of points sampled from p(z)
    Z = np.mgrid[2:-2.2:-0.2, -2:2.2:0.2].reshape(2, -1).T[:, ::-1].astype(np.float32)

    reconstructions = []
    print('generating samples')
    for idx in get_batch_idx(Z.shape[0], batch_size):
        Z_batch = Z[idx]
        X_batch = decoder_func(Z_batch)
        reconstructions.append(X_batch)

    X = np.vstack(reconstructions)
    X = X.reshape(X.shape[0], 28, 28)

    fig = plt.figure(1, (12., 12.))
    ax1 = plt.axes(frameon=False)
    ax1.get_xaxis().set_visible(False)
    ax1.get_yaxis().set_visible(False)
    plt.title('samples generated from latent space of autoencoder')
    grid = ImageGrid(
        fig, 111, nrows_ncols=(21, 21),
        share_all=True)

    print('plotting latent space')
    for i, x in enumerate(X):
        img = (x * 255).astype(np.uint8)
        grid[i].imshow(img, cmap='Greys_r')
        grid[i].get_xaxis().set_visible(False)
        grid[i].get_yaxis().set_visible(False)
        grid[i].set_frame_on(False)

    plt.savefig('latent_train_val.png', bbox_inches='tight') 
Example #11
Source File: inference.py    From FaceNet with Apache License 2.0 4 votes vote down vote up
def run(self):
        # set enviornment
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpuid)
        print("InferenceWorker init, GPU ID: {}".format(self.gpuid))

        from model import build_model

        # load models
        model_weights_path = 'models/model.00-0.0296.hdf5'
        model = build_model()
        model.load_weights(model_weights_path)

        while True:
            try:
                try:
                    item = self.in_queue.get(block=False)
                except queue.Empty:
                    continue

                image_name_0, image_name_1, image_name_2 = item

                filename = os.path.join(image_folder, image_name_0)
                image_bgr = cv.imread(filename)
                image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
                image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB)
                image_rgb_0 = preprocess_input(image_rgb)
                filename = os.path.join(image_folder, image_name_1)
                image_bgr = cv.imread(filename)
                image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
                image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB)
                image_rgb_1 = preprocess_input(image_rgb)
                filename = os.path.join(image_folder, image_name_2)
                image_bgr = cv.imread(filename)
                image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
                image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB)
                image_rgb_2 = preprocess_input(image_rgb)

                batch_inputs = np.empty((3, 1, img_size, img_size, 3), dtype=np.float32)
                batch_inputs[0] = image_rgb_0
                batch_inputs[1] = image_rgb_1
                batch_inputs[2] = image_rgb_2
                y_pred = model.predict([batch_inputs[0], batch_inputs[1], batch_inputs[2]])

                a = y_pred[0, 0:128]
                p = y_pred[0, 128:256]
                n = y_pred[0, 256:384]

                self.out_queue.put({'image_name': image_name_0, 'embedding': a})
                self.out_queue.put({'image_name': image_name_1, 'embedding': p})
                self.out_queue.put({'image_name': image_name_2, 'embedding': n})
                if self.in_queue.qsize() == 0:
                    break
            except Exception as e:
                print(e)

        import keras.backend as K
        K.clear_session()
        print('InferenceWorker done, GPU ID {}'.format(self.gpuid)) 
Example #12
Source File: train_eval.py    From FaceNet with Apache License 2.0 4 votes vote down vote up
def run(self):
        # set enviornment
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpuid)
        print("InferenceWorker init, GPU ID: {}".format(self.gpuid))

        from model import build_model

        # load models
        model = build_model()
        model.load_weights(get_best_model())

        while True:
            try:
                sample = {}
                try:
                    sample['a'] = self.in_queue.get(block=False)
                    sample['p'] = self.in_queue.get(block=False)
                    sample['n'] = self.in_queue.get(block=False)
                except queue.Empty:
                    break

                batch_inputs = np.empty((3, 1, img_size, img_size, channel), dtype=np.float32)

                for j, role in enumerate(['a', 'p', 'n']):
                    image_name = sample[role]
                    filename = os.path.join(image_folder, image_name)
                    image_bgr = cv.imread(filename)
                    image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
                    image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB)
                    batch_inputs[j, 0] = preprocess_input(image_rgb)

                y_pred = model.predict([batch_inputs[0], batch_inputs[1], batch_inputs[2]])
                a = y_pred[0, 0:128]
                p = y_pred[0, 128:256]
                n = y_pred[0, 256:384]

                self.out_queue.put({'image_name': sample['a'], 'embedding': a})
                self.out_queue.put({'image_name': sample['p'], 'embedding': p})
                self.out_queue.put({'image_name': sample['n'], 'embedding': n})
                self.signal_queue.put(SENTINEL)

                if self.in_queue.qsize() == 0:
                    break
            except Exception as e:
                print(e)

        import keras.backend as K
        K.clear_session()
        print('InferenceWorker done, GPU ID {}'.format(self.gpuid))