Python utils.one_hot() Examples
The following are 10
code examples of utils.one_hot().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils
, or try the search function
.
Example #1
Source File: load.py From kaggle-ndsb with MIT License | 6 votes |
def load_train(self): labels = utils.one_hot(data.labels_train, m=121).astype(np.float32) split = np.load(DEFAULT_VALIDATION_SPLIT_PATH) split = np.load(DEFAULT_VALIDATION_SPLIT_PATH) indices_train = split['indices_train'] indices_valid = split['indices_valid'] image_shapes = np.asarray([img.shape for img in data.load('train')]).astype(np.float32) moments = np.load("data/image_moment_stats_v1_train.pkl") centroid_distance = np.abs(moments["centroids"][:, [1, 0]] - image_shapes / 2) info = np.concatenate((centroid_distance, image_shapes, moments["angles"][:, None], moments["minor_axes"][:, None], moments["major_axes"][:, None]), 1).astype(np.float32) self.info_train = info[indices_train] self.info_valid = info[indices_valid] self.y_train = np.load(self.train_pred_file).astype(np.float32) self.y_valid = np.load(self.valid_pred_file).astype(np.float32) self.labels_train = labels[indices_train] self.labels_valid = labels[indices_valid]
Example #2
Source File: load.py From kaggle-ndsb with MIT License | 6 votes |
def load_train(self): labels = utils.one_hot(data.labels_train, m=121).astype(np.float32) split = np.load(DEFAULT_VALIDATION_SPLIT_PATH) split = np.load(DEFAULT_VALIDATION_SPLIT_PATH) indices_train = split['indices_train'] indices_valid = split['indices_valid'] image_shapes = np.asarray([img.shape for img in data.load('train')]).astype(np.float32) self.image_shapes_train = image_shapes[indices_train] self.image_shapes_valid = image_shapes[indices_valid] self.y_train = np.load(self.train_pred_file).astype(np.float32) self.y_valid = np.load(self.valid_pred_file).astype(np.float32) self.labels_train = labels[indices_train] self.labels_valid = labels[indices_valid]
Example #3
Source File: dqn_agent.py From dist-dqn with MIT License | 5 votes |
def _get_minibatch_feed_dict(self, target_q_values, non_terminal_minibatch, terminal_minibatch): """ Helper to construct the feed_dict for train_op. Takes the non-terminal and terminal minibatches as well as the max q-values computed from the target network for non-terminal states. Computes the expected q-values based on discounted future reward. @return: feed_dict to be used for train_op """ assert len(target_q_values) == len(non_terminal_minibatch) states = [] expected_q = [] actions = [] # Compute expected q-values to plug into the loss function minibatch = itertools.chain(non_terminal_minibatch, terminal_minibatch) for item, target_q in zip_longest(minibatch, target_q_values, fillvalue=0): state, action, reward, _, _ = item states.append(state) # target_q will be 0 for terminal states due to fillvalue in zip_longest expected_q.append(reward + self.config.reward_discount * target_q) actions.append(utils.one_hot(action, self.env.action_space.n)) return { self.network.x_placeholder: states, self.network.q_placeholder: expected_q, self.network.action_placeholder: actions, }
Example #4
Source File: load.py From kaggle-ndsb with MIT License | 5 votes |
def load_train(self): labels = utils.one_hot(data.labels_train, m=121).astype(np.float32) split = np.load(DEFAULT_VALIDATION_SPLIT_PATH) split = np.load(DEFAULT_VALIDATION_SPLIT_PATH) indices_train = split['indices_train'] indices_valid = split['indices_valid'] features = np.load("data/features_train.pkl").item() if "aaronmoments" in self.features: print "aaronmoments" def normalize(x): return x # return (x - x.mean(axis=0,keepdims=True))/x.std(axis=0,keepdims=True) image_shapes = np.asarray([img.shape for img in data.load('train')]).astype(np.float32) moments = np.load("data/image_moment_stats_v1_train.pkl") centroid_distance = np.abs(moments["centroids"][:, [1, 0]] - image_shapes / 2) angles = moments["angles"][:, None] minor_axes = moments["minor_axes"][:, None] major_axes = moments["major_axes"][:, None] centroid_distance = normalize(centroid_distance) angles = normalize(angles) minor_axes = normalize(minor_axes) major_axes = normalize(major_axes) features["aaronmoments"] = np.concatenate([centroid_distance,angles,minor_axes,major_axes], 1).astype(np.float32) info = np.concatenate([features[feat] for feat in self.features], 1).astype(np.float32) print info.shape self.info_train = info[indices_train] self.info_valid = info[indices_valid] self.y_train = np.load(self.train_pred_file).astype(np.float32) self.y_valid = np.load(self.valid_pred_file).astype(np.float32) self.labels_train = labels[indices_train] self.labels_valid = labels[indices_valid]
Example #5
Source File: load.py From kaggle-ndsb with MIT License | 5 votes |
def load_train(self): images = data.load('train') labels = utils.one_hot(data.labels_train, m=121).astype(np.float32) split = np.load(self.validation_split_path) indices_train = split['indices_train'] indices_valid = split['indices_valid'] self.images_train = images[indices_train] self.labels_train = labels[indices_train] self.images_valid = images[indices_valid] self.labels_valid = labels[indices_valid]
Example #6
Source File: run_dknn.py From neuralsort with MIT License | 5 votes |
def train(epoch): h_phi.train() to_average = [] # train for query, candidates in zip(batched_query_train, batched_neighbor_train): optimizer.zero_grad() cand_x, cand_y = candidates query_x, query_y = query cand_x = cand_x.to(device=gpu) cand_y = cand_y.to(device=gpu) query_x = query_x.to(device=gpu) query_y = query_y.to(device=gpu) neighbor_e = h_phi(cand_x).reshape(NUM_TRAIN_NEIGHBORS, EMBEDDING_SIZE) query_e = h_phi(query_x).reshape(NUM_TRAIN_QUERIES, EMBEDDING_SIZE) neighbor_y_oh = one_hot(cand_y).reshape(NUM_TRAIN_NEIGHBORS, 10) query_y_oh = one_hot(query_y).reshape(NUM_TRAIN_QUERIES, 10) losses = dknn_loss(query_e, neighbor_e, query_y_oh, neighbor_y_oh) loss = losses.mean() loss.backward() optimizer.step() to_average.append((-loss).item() / k) print('Avg. train correctness of top k:', sum(to_average) / len(to_average)) print('Avg. train correctness of top k:', sum( to_average) / len(to_average), file=logfile) logfile.flush()
Example #7
Source File: timitphonemerec_test.py From mist-rnns with Apache License 2.0 | 4 votes |
def main(): """ Test an RNN trained for TIMIT phoneme recognition. """ args, params_str, layer_kwargs = parse_args() _, _, test_inputs, test_labels = timitphonemerec.load_split(args.data_dir, val=False, mfcc=True, normalize=True) # Input seqs have shape [length, INPUT_SIZE]. Label seqs are int8 arrays with shape [length], # but need to have shape [length, 1] for the batch generator. test_labels = [seq[:, np.newaxis] for seq in test_labels] test_batches = utils.full_bptt_batch_generator(test_inputs, test_labels, TEST_BATCH_SIZE, num_epochs=1, shuffle=False) model = models.RNNClassificationModel(args.layer_type, INPUT_SIZE, TARGET_SIZE, args.num_hidden_units, args.activation_type, **layer_kwargs) def _error_rate(valid_predictions, valid_targets): incorrect_mask = tf.logical_not(tf.equal(tf.argmax(valid_predictions, 1), tf.argmax(valid_targets, 1))) return tf.reduce_mean(tf.to_float(incorrect_mask)) model.error_rate = _error_rate(model.valid_predictions, model.valid_targets) config = tf.ConfigProto() config.gpu_options.allow_growth = False sess = tf.Session(config=config) saver = tf.train.Saver() saver.restore(sess, os.path.join(args.results_dir, 'model.ckpt')) batch_inputs, batch_labels = next(test_batches) batch_targets = utils.one_hot(np.squeeze(batch_labels, 2), TARGET_SIZE) valid_predictions, valid_targets, error_rate = sess.run( [model.valid_predictions, model.valid_targets, model.error_rate], feed_dict={model.inputs: batch_inputs, model.targets: batch_targets} ) print('%f' % error_rate) with open(os.path.join(args.results_dir, 'test_result.txt'), 'w') as f: print('%f' % error_rate, file=f)
Example #8
Source File: mnist_test.py From mist-rnns with Apache License 2.0 | 4 votes |
def main(): """ Test an RNN for sequential (possibly permuted) MNIST recognition. """ args, params_str, layer_kwargs = parse_args() outs = mnist.load_split(args.data_dir, val=False, permute=args.permute, normalize=True, seed=0) _, _, test_images, test_labels = outs # Flatten the images. test_inputs = test_images.reshape([len(test_images), -1, INPUT_SIZE]) # Align sequence-level labels with the appropriate time steps by padding with NaNs, # and to do so, first convert the labels to floats. length = test_inputs.shape[1] pad = lambda x: np.pad(x, [[0, 0], [length - 1, 0], [0, 0]], mode='constant', constant_values=np.nan) test_labels = pad(test_labels.reshape([-1, 1, 1]).astype(np.float)) test_batches = utils.full_bptt_batch_generator(test_inputs, test_labels, TEST_BATCH_SIZE, num_epochs=1, shuffle=False) model = models.RNNClassificationModel(args.layer_type, INPUT_SIZE, TARGET_SIZE, args.num_hidden_units, args.activation_type, **layer_kwargs) def _error_rate(valid_predictions, valid_targets): incorrect_mask = tf.logical_not(tf.equal(tf.argmax(valid_predictions, 1), tf.argmax(valid_targets, 1))) return tf.reduce_mean(tf.to_float(incorrect_mask)) model.error_rate = _error_rate(model.valid_predictions, model.valid_targets) config = tf.ConfigProto() config.gpu_options.allow_growth = False sess = tf.Session(config=config) saver = tf.train.Saver() saver.restore(sess, os.path.join(args.results_dir, 'model.ckpt')) error_rates = [] for batch_inputs, batch_labels in test_batches: batch_targets = utils.one_hot(np.squeeze(batch_labels, 2), TARGET_SIZE) valid_predictions, valid_targets, batch_error_rates = sess.run( [model.valid_predictions, model.valid_targets, model.error_rate], feed_dict={model.inputs: batch_inputs, model.targets: batch_targets} ) error_rates.append(batch_error_rates) error_rate = np.mean(error_rates, dtype=np.float) print('%f' % error_rate) with open(os.path.join(args.results_dir, 'test_result.txt'), 'w') as f: print('%f' % error_rate, file=f)
Example #9
Source File: trainOps.py From adversarial-feature-augmentation with MIT License | 4 votes |
def train_feature_generator(self): print 'Training sampler.' images, labels = self.load_svhn(self.svhn_dir, split='train') labels = utils.one_hot(labels, 10) # build a graph model = self.model model.build_model() noise_dim = 100 epochs = 5000 with tf.Session(config=self.config) as sess: # initialize variables tf.global_variables_initializer().run() # restore feature extractor trained on Step 0 print ('Loading pretrained feature extractor.') variables_to_restore = slim.get_model_variables(scope='feature_extractor') restorer = tf.train.Saver(variables_to_restore) restorer.restore(sess, self.pretrained_feature_extractor) print 'Loaded' summary_writer = tf.summary.FileWriter(logdir=self.log_dir, graph=tf.get_default_graph()) saver = tf.train.Saver() for step in range(self.train_feature_generator_iters): i = step % int(images.shape[0] / self.batch_size) images_batch = images[i*self.batch_size:(i+1)*self.batch_size] labels_batch = labels[i*self.batch_size:(i+1)*self.batch_size] noise = utils.sample_Z(self.batch_size, noise_dim, 'uniform') feed_dict = {model.noise: noise, model.images: images_batch, model.labels: labels_batch} sess.run(model.d_train_op, feed_dict) sess.run(model.g_train_op, feed_dict) if (step+1) % 100 == 0: avg_D_fake = sess.run(model.logits_fake, feed_dict) avg_D_real = sess.run(model.logits_real, feed_dict) summary, dl, gl = sess.run([model.summary_op, model.d_loss, model.g_loss], feed_dict) summary_writer.add_summary(summary, step) print ('Step: [%d/%d] d_loss: %.6f g_loss: %.6f avg_d_fake: %.2f avg_d_real: %.2f ' \ %(step+1, self.train_feature_generator_iters, dl, gl, avg_D_fake.mean(), avg_D_real.mean())) print 'Saving.' saver.save(sess, self.pretrained_feature_generator)
Example #10
Source File: load.py From kaggle-ndsb with MIT License | 4 votes |
def load_train(self): train_images = data.load('train') train_labels = utils.one_hot(data.labels_train).astype(np.float32) if ("valid_pred_file" in self.__dict__): valid_pseudo_labels = np.load(self.valid_pred_file).astype(np.float32) else: print "No valid_pred_file set. Only using test-set for pseudolabeling!!" shuffle = np.load("test_shuffle_seed0.npy") if not ("shard" in self.__dict__): raise ValueError("Missing argument: shard: (should be value in {0, 1, 2})") if not self.shard in [0, 1, 2]: raise ValueError("Wrong argument: shard: (should be value in {0, 1, 2})") N = len(shuffle) if self.shard == 0: train_shard = shuffle[N/3:] if self.shard == 1: train_shard = np.concatenate((shuffle[:N/3], shuffle[2*N/3:])) if self.shard == 2: train_shard = shuffle[:2*N/3] test_images = data.load('test')[train_shard] test_pseudo_labels = np.load(self.test_pred_file)[train_shard].astype(np.float32) print test_pseudo_labels.shape if not hasattr(self, 'validation_split_path'): self.validation_split_path = DEFAULT_VALIDATION_SPLIT_PATH split = np.load(self.validation_split_path) indices_train = split['indices_train'] indices_valid = split['indices_valid'] self.images_train = train_images[indices_train] self.labels_train = train_labels[indices_train] if ("valid_pred_file" in self.__dict__): self.images_pseudo = np.concatenate((train_images[indices_valid], test_images), 0) self.labels_pseudo = np.concatenate((valid_pseudo_labels, test_pseudo_labels), 0) else: self.images_pseudo = test_images self.labels_pseudo = test_pseudo_labels self.images_valid = train_images[indices_valid] self.labels_valid = train_labels[indices_valid]