Python tensorflow.contrib.eager.Iterator() Examples
The following are 17
code examples of tensorflow.contrib.eager.Iterator().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.contrib.eager
, or try the search function
.
Example #1
Source File: dataset.py From VAE-Tensorflow with MIT License | 6 votes |
def _bulid(self, dataset, sess=None): self._dataset = dataset if self._is_eager: self._eager_iterator = tfe.Iterator(dataset) else: self._iterator = dataset.make_initializable_iterator() self._batch_op = self._iterator.get_next() if sess: self._sess = sess else: self._sess = session() try: self.reset() except: pass
Example #2
Source File: 04_word2vec_eager.py From stanford-tensorflow-tutorials with MIT License | 6 votes |
def main(): dataset = tf.data.Dataset.from_generator(gen, (tf.int32, tf.int32), (tf.TensorShape([BATCH_SIZE]), tf.TensorShape([BATCH_SIZE, 1]))) optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE) model = Word2Vec(vocab_size=VOCAB_SIZE, embed_size=EMBED_SIZE) grad_fn = tfe.implicit_value_and_gradients(model.compute_loss) total_loss = 0.0 # for average loss in the last SKIP_STEP steps num_train_steps = 0 while num_train_steps < NUM_TRAIN_STEPS: for center_words, target_words in tfe.Iterator(dataset): if num_train_steps >= NUM_TRAIN_STEPS: break loss_batch, grads = grad_fn(center_words, target_words) total_loss += loss_batch optimizer.apply_gradients(grads) if (num_train_steps + 1) % SKIP_STEP == 0: print('Average loss at step {}: {:5.1f}'.format( num_train_steps, total_loss / SKIP_STEP)) total_loss = 0.0 num_train_steps += 1
Example #3
Source File: mnist_eager.py From models with Apache License 2.0 | 6 votes |
def test(model, dataset): """Perform an evaluation of `model` on the examples from `dataset`.""" avg_loss = tfe.metrics.Mean('loss') accuracy = tfe.metrics.Accuracy('accuracy') for (images, labels) in tfe.Iterator(dataset): logits = model(images, training=False) avg_loss(loss(logits, labels)) accuracy( tf.argmax(logits, axis=1, output_type=tf.int64), tf.cast(labels, tf.int64)) print('Test set: Average loss: %.4f, Accuracy: %4f%%\n' % (avg_loss.result(), 100 * accuracy.result())) with tf.contrib.summary.always_record_summaries(): tf.contrib.summary.scalar('loss', avg_loss.result()) tf.contrib.summary.scalar('accuracy', accuracy.result())
Example #4
Source File: mnist_eager.py From models with Apache License 2.0 | 6 votes |
def train(model, optimizer, dataset, step_counter, log_interval=None): """Trains model on `dataset` using `optimizer`.""" start = time.time() for (batch, (images, labels)) in enumerate(tfe.Iterator(dataset)): with tf.contrib.summary.record_summaries_every_n_global_steps( 10, global_step=step_counter): # Record the operations used to compute the loss given the input, # so that the gradient of the loss with respect to the variables # can be computed. with tf.GradientTape() as tape: logits = model(images, training=True) loss_value = loss(logits, labels) tf.contrib.summary.scalar('loss', loss_value) tf.contrib.summary.scalar('accuracy', compute_accuracy(logits, labels)) grads = tape.gradient(loss_value, model.variables) optimizer.apply_gradients( zip(grads, model.variables), global_step=step_counter) if log_interval and batch % log_interval == 0: rate = log_interval / (time.time() - start) print('Step #%d\tLoss: %.6f (%d steps/sec)' % (batch, loss_value, rate)) start = time.time()
Example #5
Source File: 9_w2v_eager.py From deep-learning-note with MIT License | 6 votes |
def main(): dataset = tf.data.Dataset.from_generator(gen, (tf.int32, tf.int32), (tf.TensorShape([BATCH_SIZE]), tf.TensorShape([BATCH_SIZE, 1]))) optimizer = tf.compat.v1.train.GradientDescentOptimizer(LEARNING_RATE) model = Word2Vec(vocab_size=VOCAB_SIZE, embed_size=EMBED_SIZE) grad_fn = tfe.implicit_value_and_gradients(model.compute_loss) total_loss = 0.0 num_train_steps = 0 while num_train_steps < NUM_TRAIN_STEPS: for center_words, target_words in tfe.Iterator(dataset): if num_train_steps >= NUM_TRAIN_STEPS: break loss_batch, grads = grad_fn(center_words, target_words) total_loss += loss_batch optimizer.apply_gradients(grads) if (num_train_steps + 1) % SKIP_STEP == 0: print('Average loss at step {}: {:5.1f}'.format( num_train_steps, total_loss / SKIP_STEP )) total_loss = 0.0 num_train_steps += 1
Example #6
Source File: dataset.py From STGAN with MIT License | 6 votes |
def _bulid(self, dataset, sess=None): self._dataset = dataset if self._is_eager: self._eager_iterator = tfe.Iterator(dataset) else: self._iterator = dataset.make_initializable_iterator() self._batch_op = self._iterator.get_next() if sess: self._sess = sess else: self._sess = session() try: self.reset() except: pass
Example #7
Source File: mnist_eager.py From dockerfiles with Apache License 2.0 | 6 votes |
def train_one_epoch(model, optimizer, dataset, log_interval=None): """Trains model on `dataset` using `optimizer`.""" tf.train.get_or_create_global_step() def model_loss(labels, images): prediction = model(images, training=True) loss_value = loss(prediction, labels) tf.contrib.summary.scalar('loss', loss_value) tf.contrib.summary.scalar('accuracy', compute_accuracy(prediction, labels)) return loss_value for (batch, (images, labels)) in enumerate(tfe.Iterator(dataset)): with tf.contrib.summary.record_summaries_every_n_global_steps(10): batch_model_loss = functools.partial(model_loss, labels, images) optimizer.minimize( batch_model_loss, global_step=tf.train.get_global_step()) if log_interval and batch % log_interval == 0: print('Batch #%d\tLoss: %.6f' % (batch, batch_model_loss()))
Example #8
Source File: test.py From imagenet with MIT License | 5 votes |
def test(self, mode): """ Testing procedure Args: mode: string, 'validation' or 'test', choose which set to test """ test_examples = self.testset.dataset_size total_top1_accuracy = 0. total_topk_accuracy = 0. for (ex_i, (images, label)) in enumerate(tfe.Iterator(self.testset.dataset)): top_1_a = self.top_1_accuracy(images, label) top_k_a = self.top_k_accuracy(images, label) total_top1_accuracy += top_1_a total_topk_accuracy += top_k_a if (ex_i % self.cfg.DISPLAY_STEP) == 0: print ('Examples done: {:5d}/{} ---- Top-1: {:.4f} -- Top-{}: {:.4f}'.format(ex_i + 1, test_examples, total_top1_accuracy / (ex_i + 1), self.cfg.TOP_K, total_topk_accuracy / (ex_i + 1))) print ('---- Final accuracy ----') print ('Top-1: {:.4f} -- Top-{}: {:.4f}'.format(total_top1_accuracy / test_examples, self.cfg.TOP_K, total_topk_accuracy / test_examples)) print ('Top-1 error rate: {:.4f} -- Top-{} error rate: {:.4f}'.format(1 - (total_top1_accuracy / test_examples), self.cfg.TOP_K, 1 - (total_topk_accuracy / test_examples)))
Example #9
Source File: train.py From imagenet with MIT License | 5 votes |
def train(self): """ Training procedure """ start_time = time.time() step_time = 0.0 with self.writer.as_default(): with tf.contrib.summary.record_summaries_every_n_global_steps(self.cfg.DISPLAY_STEP): for e in range(self.epoch.numpy(), self.cfg.EPOCHS): tf.assign(self.epoch, e) for (batch_i, (images, labels)) in enumerate(tfe.Iterator(self.trainingset.dataset)): self.global_step = tf.train.get_global_step() step = self.global_step.numpy() + 1 step_start_time = int(round(time.time() * 1000)) self.optimizer.minimize(lambda: self.loss('train', images, labels), global_step=self.global_step) step_end_time = int(round(time.time() * 1000)) step_time += step_end_time - step_start_time if (step % self.cfg.DISPLAY_STEP) == 0: l = self.loss('train', images, labels) a = self.accuracy('train', images, labels).numpy() print ('Epoch: {:03d} Step/Batch: {:09d} Step mean time: {:04d}ms \nLoss: {:.7f} Training accuracy: {:.4f}'.format(e, step, int(step_time / step), l, a)) if (step % self.cfg.VALIDATION_STEP) == 0: val_images, val_labels = tfe.Iterator(self.valset.dataset).next() l = self.loss('val', val_images, val_labels) a = self.accuracy('val', val_images, val_labels).numpy() int_time = time.time() - start_time print ('Elapsed time: {} --- Loss: {:.7f} Validation accuracy: {:.4f}'.format(ut.format_time(int_time), l, a)) if (step % self.cfg.SAVE_STEP) == 0: tfe.Saver(self.all_variables).save(os.path.join(self.cfg.CKPT_PATH, 'net.ckpt'), global_step=self.global_step) print('Variables saved')
Example #10
Source File: dataset.py From VAE-Tensorflow with MIT License | 5 votes |
def reset(self, feed_dict={}): if self._is_eager: self._eager_iterator = tfe.Iterator(self._dataset) else: self._sess.run(self._iterator.initializer, feed_dict=feed_dict)
Example #11
Source File: eager.py From -Learn-Artificial-Intelligence-with-TensorFlow with MIT License | 5 votes |
def run_train_epoch(self, dataset): num_correct_total = 0 for (x, y) in tfe.Iterator(dataset): self.run_train_step(x, y) num_correct_total += self.num_correct return num_correct_total
Example #12
Source File: dataset.py From STGAN with MIT License | 5 votes |
def reset(self, feed_dict={}): if self._is_eager: self._eager_iterator = tfe.Iterator(self._dataset) else: self._sess.run(self._iterator.initializer, feed_dict=feed_dict)
Example #13
Source File: mnist_eager.py From dockerfiles with Apache License 2.0 | 5 votes |
def test(model, dataset): """Perform an evaluation of `model` on the examples from `dataset`.""" avg_loss = tfe.metrics.Mean('loss') accuracy = tfe.metrics.Accuracy('accuracy') for (images, labels) in tfe.Iterator(dataset): predictions = model(images, training=False) avg_loss(loss(predictions, labels)) accuracy(tf.argmax(predictions, axis=1, output_type=tf.int64), tf.argmax(labels, axis=1, output_type=tf.int64)) print('Test set: Average loss: %.4f, Accuracy: %4f%%\n' % (avg_loss.result(), 100 * accuracy.result())) with tf.contrib.summary.always_record_summaries(): tf.contrib.summary.scalar('loss', avg_loss.result()) tf.contrib.summary.scalar('accuracy', accuracy.result())
Example #14
Source File: 04_linreg_eager.py From stanford-tensorflow-tutorials with MIT License | 5 votes |
def train(loss_fn): """Train a regression model evaluated using `loss_fn`.""" print('Training; loss function: ' + loss_fn.__name__) optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) # Define the function through which to differentiate. def loss_for_example(x, y): return loss_fn(y, prediction(x)) # `grad_fn(x_i, y_i)` returns (1) the value of `loss_for_example` # evaluated at `x_i`, `y_i` and (2) the gradients of any variables used in # calculating it. grad_fn = tfe.implicit_value_and_gradients(loss_for_example) start = time.time() for epoch in range(100): total_loss = 0.0 for x_i, y_i in tfe.Iterator(dataset): loss, gradients = grad_fn(x_i, y_i) # Take an optimization step and update variables. optimizer.apply_gradients(gradients) total_loss += loss if epoch % 10 == 0: print('Epoch {0}: {1}'.format(epoch, total_loss / n_samples)) print('Took: %f seconds' % (time.time() - start)) print('Eager execution exhibits significant overhead per operation. ' 'As you increase your batch size, the impact of the overhead will ' 'become less noticeable. Eager execution is under active development: ' 'expect performance to increase substantially in the near future!')
Example #15
Source File: 04_linreg_eager_starter.py From stanford-tensorflow-tutorials with MIT License | 5 votes |
def train(loss_fn): """Train a regression model evaluated using `loss_fn`.""" print('Training; loss function: ' + loss_fn.__name__) optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) # Define the function through which to differentiate. ############################# ########## TO DO ############ ############################# def loss_for_example(x, y): pass # Obtain a gradients function using `tfe.implicit_value_and_gradients`. ############################# ########## TO DO ############ ############################# grad_fn = None start = time.time() for epoch in range(100): total_loss = 0.0 for x_i, y_i in tfe.Iterator(dataset): # Compute the loss and gradient, and take an optimization step. ############################# ########## TO DO ############ ############################# optimizer.apply_gradients(gradients) total_loss += loss if epoch % 10 == 0: print('Epoch {0}: {1}'.format(epoch, total_loss / n_samples)) print('Took: %f seconds' % (time.time() - start)) print('Eager execution exhibits significant overhead per operation. ' 'As you increase your batch size, the impact of the overhead will ' 'become less noticeable. Eager execution is under active development: ' 'expect performance to increase substantially in the near future!')
Example #16
Source File: 04_word2vec_eager_starter.py From stanford-tensorflow-tutorials with MIT License | 5 votes |
def main(): dataset = tf.data.Dataset.from_generator(gen, (tf.int32, tf.int32), (tf.TensorShape([BATCH_SIZE]), tf.TensorShape([BATCH_SIZE, 1]))) optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE) # Create the model ############################# ########## TO DO ############ ############################# model = None # Create the gradients function, using `tfe.implicit_value_and_gradients` ############################# ########## TO DO ############ ############################# grad_fn = None total_loss = 0.0 # for average loss in the last SKIP_STEP steps num_train_steps = 0 while num_train_steps < NUM_TRAIN_STEPS: for center_words, target_words in tfe.Iterator(dataset): if num_train_steps >= NUM_TRAIN_STEPS: break # Compute the loss and gradients, and take an optimization step. ############################# ########## TO DO ############ ############################# if (num_train_steps + 1) % SKIP_STEP == 0: print('Average loss at step {}: {:5.1f}'.format( num_train_steps, total_loss / SKIP_STEP)) total_loss = 0.0 num_train_steps += 1
Example #17
Source File: TFRecordsStatistics.py From DeepDenoiser with Apache License 2.0 | 4 votes |
def _dataset_iterator(self, group_by_samples_per_pixel, source_samples_per_pixel_list): directory = os.path.join(self.tfrecords_creator.base_tfrecords_directory, self.tfrecords_creator.name) if group_by_samples_per_pixel: assert len(source_samples_per_pixel_list) == 1 directory = os.path.join(directory, str(source_samples_per_pixel_list[0])) files = tf.data.Dataset.list_files(directory + '/*') threads = multiprocessing.cpu_count() dataset = tf.data.TFRecordDataset(files, compression_type='GZIP', buffer_size=None, num_parallel_reads=threads) def _feature_parser(serialized_example): features = {} for samples_per_pixel in source_samples_per_pixel_list: for source_index in range(self.tfrecords_creator.number_of_sources_per_example): for source_render_pass in self.tfrecords_creator.source_render_passes_usage.render_passes(): indexed_source_feature_name = Naming.source_feature_name(source_render_pass, samples_per_pixel=samples_per_pixel, index=source_index) features[indexed_source_feature_name] = tf.FixedLenFeature([], tf.string) for target_render_pass in self.tfrecords_creator.target_render_passes_usage.render_passes(): features[Naming.target_feature_name(target_render_pass)] = tf.FixedLenFeature([], tf.string) parsed_features = tf.parse_single_example(serialized_example, features) source_features = {} for samples_per_pixel in source_samples_per_pixel_list: for source_index in range(self.tfrecords_creator.number_of_sources_per_example): for source_render_pass in self.tfrecords_creator.source_render_passes_usage.render_passes(): indexed_source_feature_name = Naming.source_feature_name(source_render_pass, samples_per_pixel=samples_per_pixel, index=source_index) source_feature = tf.decode_raw( parsed_features[indexed_source_feature_name], tf.float32) number_of_channels = RenderPasses.number_of_channels(source_render_pass) source_feature = tf.reshape( source_feature, [self.tfrecords_creator.tiles_height_width, self.tfrecords_creator.tiles_height_width, number_of_channels]) source_features[indexed_source_feature_name] = source_feature target_features = {} for target_render_pass in self.tfrecords_creator.target_render_passes_usage.render_passes(): target_feature = tf.decode_raw( parsed_features[Naming.target_feature_name(target_render_pass)], tf.float32) number_of_channels = RenderPasses.number_of_channels(target_render_pass) target_feature = tf.reshape( target_feature, [self.tfrecords_creator.tiles_height_width, self.tfrecords_creator.tiles_height_width, number_of_channels]) target_features[Naming.target_feature_name(target_render_pass)] = target_feature return source_features, target_features dataset = dataset.map(map_func=_feature_parser, num_parallel_calls=threads) iterator = tfe.Iterator(dataset) return iterator