Python utils.batch_iter() Examples
The following are 9
code examples of utils.batch_iter().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils
, or try the search function
.
Example #1
Source File: test.py From personalized-dialog with MIT License | 6 votes |
def evaluate_one_row(candidates_tensor, true_context, sess, model, test_score, true_response): for batch in batch_iter(candidates_tensor, 512): candidate_responses = batch[:, 0, :] context_batch = np.repeat(true_context, candidate_responses.shape[0], axis=0) scores = sess.run( model.f_pos, feed_dict={model.context_batch: context_batch, model.response_batch: candidate_responses, model.neg_response_batch: candidate_responses} ) for ind, score in enumerate(scores): if score == float('Inf') or score == -float('Inf') or score == float('NaN'): print(score, ind, scores[ind]) raise ValueError if score >= test_score and not np.array_equal(candidate_responses[ind], true_response): return False return True
Example #2
Source File: utils_test.py From learning-from-human-preferences with MIT License | 6 votes |
def test_batch_iter_1(self): """ Check that batch_iter gives us exactly the right data back. """ l1 = list(range(16)) l2 = list(range(15)) l3 = list(range(13)) for l in [l1, l2, l3]: for shuffle in [True, False]: expected_data = l actual_data = set() expected_n_batches = ceil(len(l) / 4) actual_n_batches = 0 for batch_n, x in enumerate(batch_iter(l, batch_size=4, shuffle=shuffle)): if batch_n == expected_n_batches - 1 and len(l) % 4 != 0: self.assertEqual(len(x), len(l) % 4) else: self.assertEqual(len(x), 4) self.assertEqual(len(actual_data.intersection(set(x))), 0) actual_data = actual_data.union(set(x)) actual_n_batches += 1 self.assertEqual(actual_n_batches, expected_n_batches) np.testing.assert_array_equal(list(actual_data), expected_data)
Example #3
Source File: reward_predictor.py From learning-from-human-preferences with MIT License | 6 votes |
def train(self, prefs_train, prefs_val, val_interval): """ Train all ensemble members for one epoch. """ print("Training/testing with %d/%d preferences" % (len(prefs_train), len(prefs_val))) start_steps = self.n_steps start_time = time.time() for _, batch in enumerate(batch_iter(prefs_train.prefs, batch_size=32, shuffle=True)): self.train_step(batch, prefs_train) self.n_steps += 1 if self.n_steps and self.n_steps % val_interval == 0: self.val_step(prefs_val) end_time = time.time() end_steps = self.n_steps rate = (end_steps - start_steps) / (end_time - start_time) easy_tf_log.tflog('reward_predictor_training_steps_per_second', rate)
Example #4
Source File: train.py From personalized-dialog with MIT License | 5 votes |
def _train(train_tensor, batch_size, neg_size, model, optimizer, sess): avg_loss = 0 for batch in batch_iter(train_tensor, batch_size, True): for neg_batch in neg_sampling_iter(train_tensor, batch_size, neg_size): loss = sess.run( [model.loss, optimizer], feed_dict={model.context_batch: batch[:, 0, :], model.response_batch: batch[:, 1, :], model.neg_response_batch: neg_batch[:, 1, :]} ) avg_loss += loss[0] avg_loss = avg_loss / (train_tensor.shape[0]*neg_size) return avg_loss
Example #5
Source File: train.py From personalized-dialog with MIT License | 5 votes |
def _forward_all(dev_tensor, model, sess): avg_dev_loss = 0 for batch in batch_iter(dev_tensor, 256): for neg_batch in neg_sampling_iter(dev_tensor, 256, 1, 42): loss = sess.run( [model.loss], feed_dict={model.context_batch: batch[:, 0, :], model.response_batch: batch[:, 1, :], model.neg_response_batch: neg_batch[:, 1, :]} ) avg_dev_loss += loss[0] avg_dev_loss = avg_dev_loss / (dev_tensor.shape[0]*1) return avg_dev_loss
Example #6
Source File: model.py From Dense_BiLSTM with MIT License | 5 votes |
def train(self, trainset, devset, testset, batch_size=64, epochs=50, shuffle=True): self.logger.info('Start training...') init_lr = self.cfg.lr # initial learning rate, used for decay learning rate best_score = 0.0 # record the best score best_score_epoch = 1 # record the epoch of the best score obtained no_imprv_epoch = 0 # no improvement patience counter for epoch in range(self.start_epoch, epochs + 1): self.logger.info('Epoch %2d/%2d:' % (epoch, epochs)) progbar = Progbar(target=(len(trainset) + batch_size - 1) // batch_size) # number of batches if shuffle: np.random.shuffle(trainset) # shuffle training dataset each epoch # training each epoch for i, (words, labels) in enumerate(batch_iter(trainset, batch_size)): feed_dict = self._get_feed_dict(words, labels, lr=self.cfg.lr, is_train=True) _, train_loss = self.sess.run([self.train_op, self.loss], feed_dict=feed_dict) progbar.update(i + 1, [("train loss", train_loss)]) if devset is not None: self.evaluate(devset, batch_size) cur_score = self.evaluate(testset, batch_size, is_devset=False) # learning rate decay if self.cfg.decay_lr: self.cfg.lr = init_lr / (1 + self.cfg.lr_decay * epoch) # performs model saving and evaluating on test dataset if cur_score > best_score: no_imprv_epoch = 0 self.save_session(epoch) best_score = cur_score best_score_epoch = epoch self.logger.info(' -- new BEST score on TEST dataset: {:05.3f}'.format(best_score)) else: no_imprv_epoch += 1 if no_imprv_epoch >= self.cfg.no_imprv_patience: self.logger.info('early stop at {}th epoch without improvement for {} epochs, BEST score: ' '{:05.3f} at epoch {}'.format(epoch, no_imprv_epoch, best_score, best_score_epoch)) break self.logger.info('Training process done...')
Example #7
Source File: model.py From Dense_BiLSTM with MIT License | 5 votes |
def evaluate(self, dataset, batch_size, is_devset=True): accuracies = [] for words, labels in batch_iter(dataset, batch_size): feed_dict = self._get_feed_dict(words, labels, lr=None, is_train=False) accuracy = self.sess.run(self.accuracy, feed_dict=feed_dict) accuracies.append(accuracy) acc = np.mean(accuracies) * 100 self.logger.info("Testing model over {} dataset: accuracy - {:05.3f}".format('DEVELOPMENT' if is_devset else 'TEST', acc)) return acc
Example #8
Source File: utils_test.py From learning-from-human-preferences with MIT License | 5 votes |
def test_batch_iter_2(self): """ Check that shuffle=True returns the same data but in a different order. """ expected_data = list(range(16)) actual_data = [] for x in batch_iter(expected_data, batch_size=4, shuffle=True): actual_data.extend(x) self.assertEqual(len(actual_data), len(expected_data)) self.assertEqual(set(actual_data), set(expected_data)) with self.assertRaises(AssertionError): np.testing.assert_array_equal(actual_data, expected_data)
Example #9
Source File: utils_test.py From learning-from-human-preferences with MIT License | 5 votes |
def test_batch_iter_3(self): """ Check that successive calls shuffle in a different order. """ data = list(range(16)) out1 = [] for x in batch_iter(data, batch_size=4, shuffle=True): out1.extend(x) out2 = [] for x in batch_iter(data, batch_size=4, shuffle=True): out2.extend(x) self.assertEqual(set(out1), set(out2)) with self.assertRaises(AssertionError): np.testing.assert_array_equal(out1, out2)