Python data_loader.DataLoader() Examples
The following are 11
code examples of data_loader.DataLoader().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
data_loader
, or try the search function
.
Example #1
Source File: train.py From torch-light with MIT License | 7 votes |
def __init__(self, use_cuda=USECUDA, lr=LR): if use_cuda: torch.cuda.manual_seed(1234) else: torch.manual_seed(1234) self.kl_targ = 0.02 self.lr_multiplier = 1. self.use_cuda = use_cuda self.net = Net() self.eval_net = Net() if use_cuda: self.net = self.net.cuda() self.eval_net = self.eval_net.cuda() self.dl = DataLoader(use_cuda, MINIBATCH) self.sample_data = deque(maxlen=TRAINLEN) self.gen_optim(lr) self.entropy = AlphaEntropy()
Example #2
Source File: pixelda.py From Keras-GAN with MIT License | 5 votes |
def __init__(self): # Input shape self.img_rows = 32 self.img_cols = 32 self.channels = 3 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.num_classes = 10 # Configure MNIST and MNIST-M data loader self.data_loader = DataLoader(img_res=(self.img_rows, self.img_cols)) # Loss weights lambda_adv = 10 lambda_clf = 1 # Calculate output shape of D (PatchGAN) patch = int(self.img_rows / 2**4) self.disc_patch = (patch, patch, 1) # Number of residual blocks in the generator self.residual_blocks = 6 optimizer = Adam(0.0002, 0.5) # Number of filters in first layer of discriminator and classifier self.df = 64 self.cf = 64 # Build and compile the discriminators self.discriminator = self.build_discriminator() self.discriminator.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) # Build the generator self.generator = self.build_generator() # Build the task (classification) network self.clf = self.build_classifier() # Input images from both domains img_A = Input(shape=self.img_shape) img_B = Input(shape=self.img_shape) # Translate images from domain A to domain B fake_B = self.generator(img_A) # Classify the translated image class_pred = self.clf(fake_B) # For the combined model we will only train the generator and classifier self.discriminator.trainable = False # Discriminator determines validity of translated images valid = self.discriminator(fake_B) self.combined = Model(img_A, [valid, class_pred]) self.combined.compile(loss=['mse', 'categorical_crossentropy'], loss_weights=[lambda_adv, lambda_clf], optimizer=optimizer, metrics=['accuracy'])
Example #3
Source File: deep_slam.py From DeepMatchVO with MIT License | 5 votes |
def build_pose_test_graph(self, input_uint8): input_mc = self.select_tensor_or_placeholder_input(input_uint8) loader = DataLoader() tgt_image, src_image_stack = \ loader.batch_unpack_image_sequence( input_mc, self.img_height, self.img_width, self.num_source) with tf.name_scope("pose_prediction"): pred_poses, _ = pose_net(tgt_image, src_image_stack, is_training=False) self.pred_poses = pred_poses
Example #4
Source File: main.py From cite with MIT License | 5 votes |
def get_data_loaders(region_feature_dim, tok2idx): test_loader = DataLoader(args, region_feature_dim, 'test', tok2idx) if args.test: return test_loader, None, None max_length = test_loader.max_length train_loader = DataLoader(args, region_feature_dim, 'train', tok2idx) max_length = max(max_length, train_loader.max_length) val_loader = DataLoader(args, region_feature_dim, 'val', tok2idx, set(train_loader.phrases)) max_length = max(max_length, val_loader.max_length) test_loader.set_max_length(max_length) train_loader.set_max_length(max_length) val_loader.set_max_length(max_length) return test_loader, train_loader, val_loader
Example #5
Source File: main.py From GraftNet with BSD 2-Clause "Simplified" License | 5 votes |
def test(cfg): entity2id = load_dict(cfg['data_folder'] + cfg['entity2id']) word2id = load_dict(cfg['data_folder'] + cfg['word2id']) relation2id = load_dict(cfg['data_folder'] + cfg['relation2id']) test_documents = load_documents(cfg['data_folder'] + cfg['test_documents']) test_document_entity_indices, test_document_texts = index_document_entities(test_documents, word2id, entity2id, cfg['max_document_word']) test_data = DataLoader(cfg['data_folder'] + cfg['test_data'], test_documents, test_document_entity_indices, test_document_texts, word2id, relation2id, entity2id, cfg['max_query_word'], cfg['max_document_word'], cfg['use_kb'], cfg['use_doc'], cfg['use_inverse_relation']) my_model = get_model(cfg, test_data.num_kb_relation, len(entity2id), len(word2id)) test_acc = inference(my_model, test_data, entity2id, cfg, log_info=True) return test_acc
Example #6
Source File: batch_train.py From FormulaNet with BSD 3-Clause "New" or "Revised" License | 4 votes |
def main(): args = parse_args() mp.set_start_method('spawn') # Using spawn is decided. _logger = log.get_logger(__name__, args) _logger.info(print_args(args)) loaders = [] file_list = os.listdir(args.train_file) random.shuffle(file_list) for i in range(args.worker): loader = data_loader.DataLoader( args.train_file, args.dict_file, separate_conj_stmt=args.direction, binary=args.binary, part_no=i, part_total=args.worker, file_list=file_list, norename=args.norename, filter_abelian=args.fabelian, compatible=args.compatible) loaders.append(loader) loader.start_reader() net, mid_net, loss_fn = create_models(args, loaders[0], allow_resume=True) # Use fake modules to replace the real ones net = FakeModule(net) if mid_net is not None: mid_net = FakeModule(mid_net) for i in range(len(loss_fn)): loss_fn[i] = FakeModule(loss_fn[i]) opt = get_opt(net, mid_net, loss_fn, args) inqueues = [] outqueues = [] plist = [] for i in range(args.worker): recv_p, send_p = Pipe(False) recv_p2, send_p2 = Pipe(False) inqueues.append(send_p) outqueues.append(recv_p2) plist.append( Process(target=worker, args=(recv_p, send_p2, loaders[i], args, i))) plist[-1].start() _logger.warning('Training begins') train(inqueues, outqueues, net, mid_net, loss_fn, opt, loaders, args, _logger) loader.destruct() for p in plist: p.terminate() for loader in loaders: loader.destruct() _logger.warning('Training ends')
Example #7
Source File: pix2pix.py From Keras-GAN with MIT License | 4 votes |
def __init__(self): # Input shape self.img_rows = 256 self.img_cols = 256 self.channels = 3 self.img_shape = (self.img_rows, self.img_cols, self.channels) # Configure data loader self.dataset_name = 'facades' self.data_loader = DataLoader(dataset_name=self.dataset_name, img_res=(self.img_rows, self.img_cols)) # Calculate output shape of D (PatchGAN) patch = int(self.img_rows / 2**4) self.disc_patch = (patch, patch, 1) # Number of filters in the first layer of G and D self.gf = 64 self.df = 64 optimizer = Adam(0.0002, 0.5) # Build and compile the discriminator self.discriminator = self.build_discriminator() self.discriminator.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) #------------------------- # Construct Computational # Graph of Generator #------------------------- # Build the generator self.generator = self.build_generator() # Input images and their conditioning images img_A = Input(shape=self.img_shape) img_B = Input(shape=self.img_shape) # By conditioning on B generate a fake version of A fake_A = self.generator(img_B) # For the combined model we will only train the generator self.discriminator.trainable = False # Discriminators determines validity of translated images / condition pairs valid = self.discriminator([fake_A, img_B]) self.combined = Model(inputs=[img_A, img_B], outputs=[valid, fake_A]) self.combined.compile(loss=['mse', 'mae'], loss_weights=[1, 100], optimizer=optimizer)
Example #8
Source File: main.py From MobileNet with Apache License 2.0 | 4 votes |
def main(): # Parse the JSON arguments try: config_args = parse_args() except: print("Add a config file using \'--config file_name.json\'") exit(1) # Create the experiment directories _, config_args.summary_dir, config_args.checkpoint_dir = create_experiment_dirs(config_args.experiment_dir) # Reset the default Tensorflow graph tf.reset_default_graph() # Tensorflow specific configuration config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.allow_growth = True sess = tf.Session(config=config) # Data loading data = DataLoader(config_args.batch_size, config_args.shuffle) print("Loading Data...") config_args.img_height, config_args.img_width, config_args.num_channels, \ config_args.train_data_size, config_args.test_data_size = data.load_data() print("Data loaded\n\n") # Model creation print("Building the model...") model = MobileNet(config_args) print("Model is built successfully\n\n") # Summarizer creation summarizer = Summarizer(sess, config_args.summary_dir) # Train class trainer = Train(sess, model, data, summarizer) if config_args.to_train: try: print("Training...") trainer.train() print("Training Finished\n\n") except KeyboardInterrupt: trainer.save_model() if config_args.to_test: print("Final test!") trainer.test('val') print("Testing Finished\n\n")
Example #9
Source File: test_kitti_pose.py From DeepMatchVO with MIT License | 4 votes |
def main(): # get input images if not os.path.isdir(FLAGS.output_dir): os.makedirs(FLAGS.output_dir) concat_img_dir = os.path.join(FLAGS.concat_img_dir, '%.2d' % FLAGS.test_seq) max_src_offset = int((FLAGS.seq_length - 1)/2) N = len(glob(concat_img_dir + '/*.jpg')) + 2*max_src_offset test_frames = ['%.2d %.6d' % (FLAGS.test_seq, n) for n in range(N)] with open(FLAGS.dataset_dir + 'sequences/%.2d/times.txt' % FLAGS.test_seq, 'r') as f: times = f.readlines() times = np.array([float(s[:-1]) for s in times]) with tf.Session() as sess: # setup input tensor loader = DataLoader(FLAGS.concat_img_dir, FLAGS.batch_size, FLAGS.img_height, FLAGS.img_width, FLAGS.seq_length-1) image_sequence_names, tgt_inds = load_kitti_image_sequence_names(FLAGS.concat_img_dir, test_frames, FLAGS.seq_length) image_sequence_names = complete_batch_size(image_sequence_names, FLAGS.batch_size) tgt_inds = complete_batch_size(tgt_inds, FLAGS.batch_size) assert len(tgt_inds) == len(image_sequence_names) batch_sample = loader.load_test_batch(image_sequence_names) sess.run(batch_sample.initializer) input_batch = batch_sample.get_next() input_batch.set_shape([FLAGS.batch_size, FLAGS.img_height, FLAGS.img_width * FLAGS.seq_length, 3]) # init system system = DeepSlam() system.setup_inference(FLAGS.img_height, FLAGS.img_width, 'pose', FLAGS.seq_length, FLAGS.batch_size, input_batch) saver = tf.train.Saver([var for var in tf.trainable_variables()]) saver.restore(sess, FLAGS.ckpt_file) round_num = len(image_sequence_names) // FLAGS.batch_size for i in range(round_num): pred = system.inference(sess, mode='pose') for j in range(FLAGS.batch_size): tgt_idx = tgt_inds[i * FLAGS.batch_size + j] pred_poses = pred['pose'][j] # Insert the target pose [0, 0, 0, 0, 0, 0] to the middle pred_poses = np.insert(pred_poses, max_src_offset, np.zeros((1,6)), axis=0) curr_times = times[tgt_idx-max_src_offset : tgt_idx+max_src_offset+1] out_file = FLAGS.output_dir + '%.6d.txt' % (tgt_idx - max_src_offset) dump_pose_seq_TUM(out_file, pred_poses, curr_times)
Example #10
Source File: main.py From face-antispoofing-using-mobileNet with Apache License 2.0 | 4 votes |
def main(): # Parse the JSON arguments try: config_args = parse_args() except: print("Add a config file using \'--config file_name.json\'") exit(1) # Create the experiment directories _, config_args.summary_dir, config_args.checkpoint_dir = create_experiment_dirs(config_args.experiment_dir) # Reset the default Tensorflow graph tf.reset_default_graph() # Tensorflow specific configuration config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.allow_growth = True sess = tf.Session(config=config) # Data loading data = DataLoader(config_args.batch_size, config_args.shuffle) print("Loading Data...") config_args.img_height, config_args.img_width, config_args.num_channels, \ config_args.train_data_size, config_args.test_data_size = data.load_data() print("Data loaded\n\n") # Model creation print("Building the model...") model = MobileNet(config_args) print("Model is built successfully\n\n") # Summarizer creation summarizer = Summarizer(sess, config_args.summary_dir) # Train class trainer = Train(sess, model, data, summarizer) if config_args.to_train: try: print("Training...") trainer.train() print("Training Finished\n\n") except KeyboardInterrupt: trainer.save_model() if config_args.to_test: print("Final test!") trainer.test('val') print("Testing Finished\n\n")
Example #11
Source File: main.py From ShuffleNet with Apache License 2.0 | 4 votes |
def main(): # Parse the JSON arguments config_args = parse_args() # Create the experiment directories _, config_args.summary_dir, config_args.checkpoint_dir = create_experiment_dirs(config_args.experiment_dir) # Reset the default Tensorflow graph tf.reset_default_graph() # Tensorflow specific configuration config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.allow_growth = True sess = tf.Session(config=config) # Data loading # The batch size is equal to 1 when testing to simulate the real experiment. data_batch_size = config_args.batch_size if config_args.train_or_test == "train" else 1 data = DataLoader(data_batch_size, config_args.shuffle) print("Loading Data...") config_args.img_height, config_args.img_width, config_args.num_channels, \ config_args.train_data_size, config_args.test_data_size = data.load_data() print("Data loaded\n\n") # Model creation print("Building the model...") model = ShuffleNet(config_args) print("Model is built successfully\n\n") # Parameters visualization show_parameters() # Summarizer creation summarizer = Summarizer(sess, config_args.summary_dir) # Train class trainer = Train(sess, model, data, summarizer) if config_args.train_or_test == 'train': try: # print("FLOPs for batch size = " + str(config_args.batch_size) + "\n") # calculate_flops() print("Training...") trainer.train() print("Training Finished\n\n") except KeyboardInterrupt: trainer.save_model() elif config_args.train_or_test == 'test': # print("FLOPs for single inference \n") # calculate_flops() # This can be 'val' or 'test' or even 'train' according to the needs. print("Testing...") trainer.test('val') print("Testing Finished\n\n") else: raise ValueError("Train or Test options only are allowed")