Python utils.load_model() Examples

The following are 17 code examples of utils.load_model(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module utils , or try the search function .
Example #1
Source File: predict.py    From Wave-U-Net-Pytorch with MIT License 6 votes vote down vote up
def main(args):
    # MODEL
    num_features = [args.features*i for i in range(1, args.levels+1)] if args.feature_growth == "add" else \
                   [args.features*2**i for i in range(0, args.levels)]
    target_outputs = int(args.output_size * args.sr)
    model = Waveunet(args.channels, num_features, args.channels, args.instruments, kernel_size=args.kernel_size,
                     target_output_size=target_outputs, depth=args.depth, strides=args.strides,
                     conv_type=args.conv_type, res=args.res, separate=args.separate)

    if args.cuda:
        model = utils.DataParallel(model)
        print("move model to gpu")
        model.cuda()

    print("Loading model from checkpoint " + str(args.load_model))
    state = utils.load_model(model, None, args.load_model)
    print('Step', state['step'])

    preds = predict_song(args, args.input, model)

    output_folder = os.path.dirname(args.input) if args.output is None else args.output
    for inst in preds.keys():
        utils.write_wav(os.path.join(output_folder, os.path.basename(args.input) + "_" + inst + ".wav"), preds[inst], args.sr) 
Example #2
Source File: visualize.py    From ba-dls-deepspeech with Apache License 2.0 6 votes vote down vote up
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('test_file', type=str,
                        help='Path to an audio file')
    parser.add_argument('train_desc_file', type=str,
                        help='Path to the training JSON-line file. This will '
                             'be used to extract feature means/variance')
    parser.add_argument('load_dir', type=str,
                        help='Directory where a trained model is stored.')
    parser.add_argument('--weights_file', type=str, default=None,
                        help='Path to a model weights file')
    args = parser.parse_args()

    print ("Loading model")
    model = load_model(args.load_dir, args.weights_file)
    visualize(model, args.test_file, args.train_desc_file) 
Example #3
Source File: interpolation_in_text.py    From Hands-On-Generative-Adversarial-Networks-with-Keras with MIT License 6 votes vote down vote up
def infer(data_filepath='data/flowers.hdf5', z_dim=128, out_dir='gan',
          n_steps=10):

    G = load_model(out_dir)
    val_data = get_data(data_filepath, 'train')
    val_data = next(iterate_minibatches(val_data, 2))
    emb_source, emb_target = val_data[1]
    txts = val_data[2]

    z = np.random.uniform(-1, 1, size=(1, z_dim))

    G.trainable = False
    for i in range(n_steps+1):
        p = i/float(n_steps)
        emb = emb_source * (1-p) + emb_target * p
        emb = emb[None, :]
        fake_image = G.predict([z, emb])[0]
        img = ((fake_image + 1)*0.5)
        plt.imsave("{}/fake_text_interpolation_i{}".format(out_dir, i), img)
        print(i, str(txts[int(round(p))]).strip(),
              file=open("{}/fake_text_interpolation.txt".format(out_dir), "a")) 
Example #4
Source File: inference.py    From Hands-On-Generative-Adversarial-Networks-with-Keras with MIT License 6 votes vote down vote up
def infer(data_filepath='data/flowers.hdf5', z_dim=128, out_dir='gan',
          n_samples=5):

    G = load_model(out_dir)
    val_data = get_data(data_filepath, 'train')
    val_data = next(iterate_minibatches(val_data, n_samples))    
    emb, txts = val_data[1], val_data[2]

    # sample z vector for inference
    z = np.random.uniform(-1, 1, size=(n_samples, z_dim))

    G.trainable = False
    fake_images = G.predict([z, emb])
    for i in range(n_samples):
        img = ((fake_images[i] + 1)*0.5)
        plt.imsave("{}/fake_{}".format(out_dir, i), img)
        print(i, str(txts[i]).strip(),
              file=open("{}/fake_text.txt".format(out_dir), "a")) 
Example #5
Source File: test.py    From ba-dls-deepspeech with Apache License 2.0 6 votes vote down vote up
def main(test_desc_file, train_desc_file, load_dir):
    # Prepare the data generator
    datagen = DataGenerator()
    # Load the JSON file that contains the dataset
    datagen.load_test_data(test_desc_file)
    datagen.load_train_data(train_desc_file)
    # Use a few samples from the dataset, to calculate the means and variance
    # of the features, so that we can center our inputs to the network
    datagen.fit_train(100)

    # Compile a Recurrent Network with 1 1D convolution layer, GRU units
    # and 1 fully connected layer
    model = load_model(load_dir)

    # Compile the testing function
    test_fn = compile_test_fn(model)

    # Test the model
    test_loss = test(model, test_fn, datagen)
    print ("Test loss: {}".format(test_loss)) 
Example #6
Source File: acc_conv.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def main():
  model = utils.load_model(args)
  new_model = conv_vh_decomposition(model, args)
  new_model.save(args.save_model) 
Example #7
Source File: acc_fc.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def main():
  model = utils.load_model(args)
  new_model = fc_decomposition(model, args)
  new_model.save(args.save_model) 
Example #8
Source File: acc_fc.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def main():
  model = utils.load_model(args)
  new_model = fc_decomposition(model, args)
  new_model.save(args.save_model) 
Example #9
Source File: hooks.py    From pycodesuggest with MIT License 5 votes vote down vote up
def __call__(self, sess, epoch, iteration, model, loss, processed):
        if epoch == self.at_epoch:
            print("Loading model...")
            model = load_model(sess, self.path + "latest/") 
Example #10
Source File: train.py    From glad with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def run(args):
    pprint(args)
    logging.basicConfig(level=logging.INFO)

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    seed(args.seed)

    dataset, ontology, vocab, Eword = load_dataset()

    model = load_model(args.model, args, ontology, vocab)
    model.save_config()
    model.load_emb(Eword)

    model = model.to(model.device)
    if not args.test:
        logging.info('Starting train')
        model.run_train(dataset['train'], dataset['dev'], args)
    if args.resume:
        model.load_best_save(directory=args.resume)
    else:
        model.load_best_save(directory=args.dout)
    model = model.to(model.device)
    logging.info('Running dev evaluation')
    dev_out = model.run_eval(dataset['dev'], args)
    pprint(dev_out) 
Example #11
Source File: models.py    From punctuator with MIT License 5 votes vote down vote up
def load(self, model):
        self.t_lstm = load_model(model["t_lstm_file_name"])
        self.in_vocabulary = self.t_lstm.in_vocabulary
        super(TA_LSTM, self).load(model) 
Example #12
Source File: acc_conv.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def main():
  model = utils.load_model(args)
  new_model = conv_vh_decomposition(model, args)
  new_model.save(args.save_model) 
Example #13
Source File: arithmetic_in_text.py    From Hands-On-Generative-Adversarial-Networks-with-Keras with MIT License 4 votes vote down vote up
def infer(data_filepath='data/flowers.hdf5', z_dim=128, out_dir='gan',
          n_steps=10):

    G = load_model(out_dir)
    val_data = get_data(data_filepath, 'train')
    val_data = next(iterate_minibatches(val_data, 2))
    emb_a, emb_b = val_data[1]
    txts = val_data[2]

    # add batch dimension
    emb_a, emb_b = emb_a[None, :], emb_b[None, :]

    # sample z vector for inference
    z = np.random.uniform(-1, 1, size=(1, z_dim))

    G.trainable = False
    # predict using embeddings a and b
    fake_image_a = G.predict([z, emb_a])[0]
    fake_image_b  = G.predict([z, emb_b])[0]

    # add and subtract
    emb_add = (emb_a + emb_b)
    emb_a_sub_b = (emb_a - emb_b)
    emb_b_sub_a = (emb_b - emb_a)

    # generate images
    fake_a = G.predict([z, emb_a])[0]
    fake_b = G.predict([z, emb_b])[0]
    fake_add = G.predict([z, emb_add])[0]
    fake_a_sub_b = G.predict([z, emb_a_sub_b])[0]
    fake_b_sub_a = G.predict([z, emb_b_sub_a])[0]

    fake_a = ((fake_a + 1)*0.5)
    fake_b = ((fake_b + 1)*0.5)
    fake_add = ((fake_add + 1)*0.5)
    fake_a_sub_b = ((fake_a_sub_b + 1)*0.5)
    fake_b_sub_a = ((fake_b_sub_a + 1)*0.5)

    plt.imsave("{}/fake_text_arithmetic_a".format(out_dir), fake_a)
    plt.imsave("{}/fake_text_arithmetic_b".format(out_dir), fake_b)
    plt.imsave("{}/fake_text_arithmetic_add".format(out_dir), fake_add)
    plt.imsave("{}/fake_text_arithmetic_a_sub_b".format(out_dir), fake_a_sub_b)
    plt.imsave("{}/fake_text_arithmetic_b_sub_a".format(out_dir), fake_b_sub_a)
    print(str(txts[0]), str(txts[1]),
          file=open("{}/fake_text_arithmetic.txt".format(out_dir), "a")) 
Example #14
Source File: main.py    From Bert-BiLSTM-CRF-pytorch with MIT License 4 votes vote down vote up
def train(**kwargs):
    config = Config()
    config.update(**kwargs)
    print('当前设置为:\n', config)
    if config.use_cuda:
        torch.cuda.set_device(config.gpu)
    print('loading corpus')
    vocab = load_vocab(config.vocab)
    label_dic = load_vocab(config.label_file)
    tagset_size = len(label_dic)
    train_data = read_corpus(config.train_file, max_length=config.max_length, label_dic=label_dic, vocab=vocab)
    dev_data = read_corpus(config.dev_file, max_length=config.max_length, label_dic=label_dic, vocab=vocab)

    train_ids = torch.LongTensor([temp.input_id for temp in train_data])
    train_masks = torch.LongTensor([temp.input_mask for temp in train_data])
    train_tags = torch.LongTensor([temp.label_id for temp in train_data])

    train_dataset = TensorDataset(train_ids, train_masks, train_tags)
    train_loader = DataLoader(train_dataset, shuffle=True, batch_size=config.batch_size)

    dev_ids = torch.LongTensor([temp.input_id for temp in dev_data])
    dev_masks = torch.LongTensor([temp.input_mask for temp in dev_data])
    dev_tags = torch.LongTensor([temp.label_id for temp in dev_data])

    dev_dataset = TensorDataset(dev_ids, dev_masks, dev_tags)
    dev_loader = DataLoader(dev_dataset, shuffle=True, batch_size=config.batch_size)
    model = BERT_LSTM_CRF(config.bert_path, tagset_size, config.bert_embedding, config.rnn_hidden, config.rnn_layer, dropout_ratio=config.dropout_ratio, dropout1=config.dropout1, use_cuda=config.use_cuda)
    if config.load_model:
        assert config.load_path is not None
        model = load_model(model, name=config.load_path)
    if config.use_cuda:
        model.cuda()
    model.train()
    optimizer = getattr(optim, config.optim)
    optimizer = optimizer(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
    eval_loss = 10000
    for epoch in range(config.base_epoch):
        step = 0
        for i, batch in enumerate(train_loader):
            step += 1
            model.zero_grad()
            inputs, masks, tags = batch
            inputs, masks, tags = Variable(inputs), Variable(masks), Variable(tags)
            if config.use_cuda:
                inputs, masks, tags = inputs.cuda(), masks.cuda(), tags.cuda()
            feats = model(inputs, masks)
            loss = model.loss(feats, masks,tags)
            loss.backward()
            optimizer.step()
            if step % 50 == 0:
                print('step: {} |  epoch: {}|  loss: {}'.format(step, epoch, loss.item()))
        loss_temp = dev(model, dev_loader, epoch, config)
        if loss_temp < eval_loss:
            save_model(model,epoch) 
Example #15
Source File: visualize.py    From FactorNet with MIT License 4 votes vote down vote up
def main():
    """
    The main executable function
    """
    parser = make_argument_parser()
    args = parser.parse_args()

    input_dir = args.inputdir
    model_dir = args.modeldir
    bed_file = args.bed
    chrom = args.chrom

    if args.outputdir is None:
        clobber = True
        output_dir = args.outputdirc
    else:
        clobber = False
        output_dir = args.outputdir

    try:  # adapted from dreme.py by T. Bailey
        os.makedirs(output_dir)
    except OSError as exc:
        if exc.errno == errno.EEXIST:
            if not clobber:
                print >> sys.stderr, ('output directory (%s) already exists '
                                      'but you specified not to clobber it') % output_dir
                sys.exit(1)
            else:
                print >> sys.stderr, ('output directory (%s) already exists '
                                      'so it will be clobbered') % output_dir

    print 'Loading genome'
    genome = utils.load_genome()
    print 'Loading model'
    model_tfs, model_bigwig_names, features, model = utils.load_model(model_dir)
    L = model.input_shape[0][1]
    utils.L = L
    use_meta = 'meta' in features
    use_gencode = 'gencode' in features
    print 'Loading BED data'
    is_sorted = False
    bigwig_names, meta_names, datagen_bed, nonblacklist_bools = utils.load_beddata(genome, bed_file, use_meta, use_gencode, input_dir, is_sorted, chrom)
    assert bigwig_names == model_bigwig_names
    if use_meta:
        model_meta_file = model_dir + '/meta.txt'
        assert os.path.isfile(model_meta_file)
        model_meta_names = np.loadtxt(model_meta_file, dtype=str)
        if len(model_meta_names.shape) == 0:
            model_meta_names = [str(model_meta_names)]
        else:
            model_meta_names = list(model_meta_names)
        assert meta_names == model_meta_names
    output_results(bigwig_names, datagen_bed, model, output_dir) 
Example #16
Source File: predict.py    From FactorNet with MIT License 4 votes vote down vote up
def main():
    """
    The main executable function
    """
    parser = make_argument_parser()
    args = parser.parse_args()

    input_dir = args.inputdir
    model_dir = args.modeldir
    tf = args.factor
    bed_file = args.bed
    output_file = args.outputfile

    print 'Loading genome'
    genome = utils.load_genome()
    print 'Loading model'
    model_tfs, model_bigwig_names, features, model = utils.load_model(model_dir)
    L = model.input_shape[0][1]
    utils.L = L
    assert tf in model_tfs
    assert 'bigwig' in features
    use_meta = 'meta' in features
    use_gencode = 'gencode' in features
    print 'Loading test data'
    is_sorted = True
    bigwig_names, meta_names, datagen_bed, nonblacklist_bools = utils.load_beddata(genome, bed_file, use_meta, use_gencode, input_dir, is_sorted)
    assert bigwig_names == model_bigwig_names
    if use_meta:
        model_meta_file = model_dir + '/meta.txt'
        assert os.path.isfile(model_meta_file)
        model_meta_names = np.loadtxt(model_meta_file, dtype=str)
        if len(model_meta_names.shape) == 0:
            model_meta_names = [str(model_meta_names)]
        else:
            model_meta_names = list(model_meta_names)
        assert meta_names == model_meta_names
    print 'Generating predictions'
    model_tf_index = model_tfs.index(tf)
    model_predicts = model.predict_generator(datagen_bed, val_samples=len(datagen_bed), pickle_safe=True)
    if len(model_tfs) > 1:
        model_tf_predicts = model_predicts[:, model_tf_index]
    else:
        model_tf_predicts = model_predicts
    final_scores = np.zeros(len(nonblacklist_bools))
    final_scores[nonblacklist_bools] = model_tf_predicts
    print 'Saving predictions'
    df = pandas.read_csv(bed_file, sep='\t', header=None)
    df[3] = final_scores
    df.to_csv(output_file, sep='\t', compression='gzip', float_format='%.3e', header=False, index=False) 
Example #17
Source File: trainer.py    From punctuator with MIT License 4 votes vote down vote up
def _train(net, training_data, validation_data, model_name, learning_rate, max_epochs, min_improvement):
    min_learning_rate = 1e-6
    best_validation_ppl = np.inf
    divide = False

    for epoch in range(1, max_epochs+1):
        
        epoch_start = time()
        
        print "\n======= EPOCH %s =======" % epoch
        print "\tLearning rate is %s" % learning_rate

        train_ppl = _process_corpus(net, training_data, mode='train', learning_rate=learning_rate) 
        print "\tTrain PPL is %.3f" % train_ppl

        validation_ppl = _process_corpus(net, validation_data, mode='test')
        print "\tValidation PPL is %.3f" % validation_ppl

        print "\tTime taken: %ds" % (time() - epoch_start)

        if np.log(validation_ppl) * min_improvement > np.log(best_validation_ppl): # Mikolovs recipe
            if not divide:
                divide = True
                print "\tStarting to reduce the learning rate..."
                if validation_ppl > best_validation_ppl:
                    print "\tLoading best model."
                    net = utils.load_model("../out/" + model_name)
            else:
                if validation_ppl < best_validation_ppl:
                    print "\tSaving model."
                    net.save("../out/" + model_name, final=True)
                break
        else:
            print "\tNew best model! Saving..."
            best_validation_ppl = validation_ppl
            final = learning_rate / 2. < min_learning_rate or epoch == max_epochs
            net.save("../out/" + model_name, final)

        if divide:
            learning_rate /= 2.
        
        if learning_rate < min_learning_rate:
            break
            
    print "-"*30
    print "Finished training."
    print "Best validation PPL is %.3f\n\n" % best_validation_ppl