Python data_loader.load_data() Examples
The following are 10
code examples of data_loader.load_data().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
data_loader
, or try the search function
.
Example #1
Source File: main.py From transferlearning with MIT License | 5 votes |
def load_data(src, tar, root_dir): folder_src = root_dir + src + '/images/' folder_tar = root_dir + tar + '/images/' source_loader = data_loader.load_data( folder_src, CFG['batch_size'], True, CFG['kwargs']) target_train_loader = data_loader.load_data( folder_tar, CFG['batch_size'], True, CFG['kwargs']) target_test_loader = data_loader.load_data( folder_tar, CFG['batch_size'], False, CFG['kwargs']) return source_loader, target_train_loader, target_test_loader
Example #2
Source File: main.py From Unsupervised-Attention-guided-Image-to-Image-Translation with MIT License | 5 votes |
def test(self): """Test Function.""" print("Testing the results") self.inputs = data_loader.load_data( self._dataset_name, self._size_before_crop, False, self._do_flipping) self.model_setup() saver = tf.train.Saver() init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) chkpt_fname = tf.train.latest_checkpoint(self._checkpoint_dir) saver.restore(sess, chkpt_fname) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) self._num_imgs_to_save = cyclegan_datasets.DATASET_TO_SIZES[ self._dataset_name] self.save_images_bis(sess, sess.run(self.global_step)) coord.request_stop() coord.join(threads)
Example #3
Source File: conv.py From rmnist with MIT License | 5 votes |
def __init__(self, n=0, train=True, transform=None, expanded=False): self.n = n self.transform = transform td, vd, ts = data_loader.load_data(n, expanded=expanded) if train: self.data = td else: self.data = vd
Example #4
Source File: transfer.py From rmnist with MIT License | 5 votes |
def transfer(n): td, vd, ts = data_loader.load_data(n, abstract=True, expanded=expanded) classifiers = [ #sklearn.svm.SVC(), #sklearn.svm.SVC(kernel="linear", C=0.1), #sklearn.neighbors.KNeighborsClassifier(1), #sklearn.tree.DecisionTreeClassifier(), #sklearn.ensemble.RandomForestClassifier(max_depth=10, n_estimators=500, max_features=1), sklearn.neural_network.MLPClassifier(alpha=1.0, hidden_layer_sizes=(300,), max_iter=500) ] for clf in classifiers: clf.fit(td[0], td[1]) print "\n{}: {}".format(type(clf).__name__, round(clf.score(vd[0], vd[1])*100, 2))
Example #5
Source File: generate_abstract_features.py From rmnist with MIT License | 5 votes |
def __init__(self, n=0, train=True, transform=None, expanded=False): self.n = n self.transform = transform td, vd, ts = data_loader.load_data(n, expanded=expanded) if train: self.data = td else: self.data = vd
Example #6
Source File: anneal.py From rmnist with MIT License | 5 votes |
def __init__(self, n=0, train=True, transform=None, expanded=False): self.n = n self.transform = transform td, vd, ts = data_loader.load_data(n, expanded=expanded) if train: self.data = td else: self.data = vd
Example #7
Source File: baselines.py From rmnist with MIT License | 5 votes |
def baselines(n): td, vd, ts = data_loader.load_data(n) classifiers = [ sklearn.svm.SVC(C=1000), sklearn.svm.SVC(kernel="linear", C=0.1), sklearn.neighbors.KNeighborsClassifier(1), sklearn.tree.DecisionTreeClassifier(), sklearn.ensemble.RandomForestClassifier(max_depth=10, n_estimators=500, max_features=1), sklearn.neural_network.MLPClassifier(alpha=1, hidden_layer_sizes=(500, 100)) ] for clf in classifiers: clf.fit(td[0], td[1]) print "\n{}: {}".format(type(clf).__name__, round(clf.score(vd[0], vd[1])*100, 2))
Example #8
Source File: train.py From facial-expression-recognition-svm with GNU General Public License v3.0 | 4 votes |
def train(epochs=HYPERPARAMS.epochs, random_state=HYPERPARAMS.random_state, kernel=HYPERPARAMS.kernel, decision_function=HYPERPARAMS.decision_function, gamma=HYPERPARAMS.gamma, train_model=True): print( "loading dataset " + DATASET.name + "...") if train_model: data, validation = load_data(validation=True) else: data, validation, test = load_data(validation=True, test=True) if train_model: # Training phase print( "building model...") model = SVC(random_state=random_state, max_iter=epochs, kernel=kernel, decision_function_shape=decision_function, gamma=gamma) print( "start training...") print( "--") print( "kernel: {}".format(kernel)) print( "decision function: {} ".format(decision_function)) print( "max epochs: {} ".format(epochs)) print( "gamma: {} ".format(gamma)) print( "--") print( "Training samples: {}".format(len(data['Y']))) print( "Validation samples: {}".format(len(validation['Y']))) print( "--") start_time = time.time() model.fit(data['X'], data['Y']) training_time = time.time() - start_time print( "training time = {0:.1f} sec".format(training_time)) if TRAINING.save_model: print( "saving model...") with open(TRAINING.save_model_path, 'wb') as f: cPickle.dump(model, f) print( "evaluating...") validation_accuracy = evaluate(model, validation['X'], validation['Y']) print( " - validation accuracy = {0:.1f}".format(validation_accuracy*100)) return validation_accuracy else: # Testing phase : load saved model and evaluate on test dataset print( "start evaluation...") print( "loading pretrained model...") if os.path.isfile(TRAINING.save_model_path): with open(TRAINING.save_model_path, 'rb') as f: model = cPickle.load(f) else: print( "Error: file '{}' not found".format(TRAINING.save_model_path)) exit() print( "--") print( "Validation samples: {}".format(len(validation['Y']))) print( "Test samples: {}".format(len(test['Y']))) print( "--") print( "evaluating...") start_time = time.time() validation_accuracy = evaluate(model, validation['X'], validation['Y']) print( " - validation accuracy = {0:.1f}".format(validation_accuracy*100)) test_accuracy = evaluate(model, test['X'], test['Y']) print( " - test accuracy = {0:.1f}".format(test_accuracy*100)) print( " - evalution time = {0:.1f} sec".format(time.time() - start_time)) return test_accuracy
Example #9
Source File: eval.py From Transformer-in-generating-dialogue with Apache License 2.0 | 4 votes |
def eval(): g = Graph(is_training = False) print("MSG : Graph loaded!") X, Sources, Targets = load_data('test') en2idx, idx2en = load_vocab('en.vocab.tsv') de2idx, idx2de = load_vocab('de.vocab.tsv') with g.graph.as_default(): sv = tf.train.Supervisor() with sv.managed_session(config = tf.ConfigProto(allow_soft_placement = True)) as sess: # load pre-train model sv.saver.restore(sess, tf.train.latest_checkpoint(pm.checkpoint)) print("MSG : Restore Model!") mname = open(pm.checkpoint + '/checkpoint', 'r').read().split('"')[1] if not os.path.exists('Results'): os.mkdir('Results') with codecs.open("Results/" + mname, 'w', 'utf-8') as f: list_of_refs, predict = [], [] # Get a batch for i in range(len(X) // pm.batch_size): x = X[i * pm.batch_size: (i + 1) * pm.batch_size] sources = Sources[i * pm.batch_size: (i + 1) * pm.batch_size] targets = Targets[i * pm.batch_size: (i + 1) * pm.batch_size] # Autoregressive inference preds = np.zeros((pm.batch_size, pm.maxlen), dtype = np.int32) for j in range(pm.maxlen): _preds = sess.run(g.preds, feed_dict = {g.inpt: x, g.outpt: preds}) preds[:, j] = _preds[:, j] for source, target, pred in zip(sources, targets, preds): got = " ".join(idx2de[idx] for idx in pred).split("<EOS>")[0].strip() f.write("- Source: {}\n".format(source)) f.write("- Ground Truth: {}\n".format(target)) f.write("- Predict: {}\n\n".format(got)) f.flush() # Bleu Score ref = target.split() prediction = got.split() if len(ref) > pm.word_limit_lower and len(prediction) > pm.word_limit_lower: list_of_refs.append([ref]) predict.append(prediction) score = corpus_bleu(list_of_refs, predict) f.write("Bleu Score = " + str(100 * score))
Example #10
Source File: eval_sick.py From neon with Apache License 2.0 | 4 votes |
def main(): # parse the command line arguments parser = NeonArgparser(__doc__) parser.add_argument('--output_path', required=True, help='Output path used when training model') parser.add_argument('--w2v_path', required=False, default=None, help='Path to GoogleNews w2v file for voab expansion.') parser.add_argument('--eval_data_path', required=False, default='./SICK_data', help='Path to the SICK dataset for evaluating semantic relateness') parser.add_argument('--max_vocab_size', required=False, default=1000000, help='Limit the vocabulary expansion to fit in GPU memory') parser.add_argument('--subset_pct', required=False, default=100, help='subset of training dataset to use (use to retreive \ preprocessed data from training)') args = parser.parse_args(gen_be=True) # load vocab file from training _, vocab_file = load_data(args.data_dir, output_path=args.output_path, subset_pct=float(args.subset_pct)) vocab, _, _ = load_obj(vocab_file) vocab_size = len(vocab) neon_logger.display("\nVocab size from the dataset is: {}".format(vocab_size)) index_from = 2 # 0: padding 1: oov vocab_size_layer = vocab_size + index_from max_len = 30 # load trained model model_dict = load_obj(args.model_file) # Vocabulary expansion trick needs to pass the correct vocab set to evaluate (for tokenization) if args.w2v_path: neon_logger.display("Performing Vocabulary Expansion... Loading W2V...") w2v_vocab, w2v_vocab_size = get_w2v_vocab(args.w2v_path, int(args.max_vocab_size), cache=True) vocab_size_layer = w2v_vocab_size + index_from model = load_sent_encoder(model_dict, expand_vocab=True, orig_vocab=vocab, w2v_vocab=w2v_vocab, w2v_path=args.w2v_path, use_recur_last=True) vocab = w2v_vocab else: # otherwise stick with original vocab size used to train the model model = load_sent_encoder(model_dict, use_recur_last=True) model.initialize(dataset=(max_len, 1)) evaluate(model, vocab=vocab, data_path=args.eval_data_path, evaltest=True, vocab_size_layer=vocab_size_layer)