Python reader.ptb_raw_data() Examples
The following are 20
code examples of reader.ptb_raw_data().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
reader
, or try the search function
.
Example #1
Source File: ptb.py From tensor-fsmn with MIT License | 6 votes |
def main(): sys.stdout.write("start ptb") raw_data = reader.ptb_raw_data("") train_data, valid_data, test_data, word_to_id = raw_data with tf.Graph().as_default(), tf.Session() as session: initializer = tf.random_uniform_initializer(-0.04, 0.04) with tf.variable_scope("model", reuse=None, initializer=initializer): model = PTBModel() saver = tf.train.Saver() tf.initialize_all_variables().run() model.train_writer = tf.train.SummaryWriter('./train', graph=session.graph) for i in range(13): sys.stdout.write("Epoch: %d\n" % (i + 1)) train_perplexity = model.train(session, train_data) sys.stdout.write("Epoch: %d Train Perplexity: %.3f\n" % (i + 1, train_perplexity)) valid_perplexity = model.evaluate(session, valid_data) sys.stdout.write("Epoch: %d Valid Perplexity: %.3f\n" % (i + 1, valid_perplexity)) test_perplexity = model.evaluate(session, test_data) sys.stdout.write("Epoch: %d Test Perplexity: %.3f\n" % (i + 1, test_perplexity)) # model.predict(session, test_data, word_to_id) saver.save(session, 'model.ckpt')
Example #2
Source File: reader_test.py From hands-detection with MIT License | 5 votes |
def testPtbRawData(self): tmpdir = tf.test.get_temp_dir() for suffix in "train", "valid", "test": filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix) with tf.gfile.GFile(filename, "w") as fh: fh.write(self._string_data) # Smoke test output = reader.ptb_raw_data(tmpdir) self.assertEqual(len(output), 4)
Example #3
Source File: reader_test.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def testPtbRawData(self): tmpdir = tf.test.get_temp_dir() for suffix in "train", "valid", "test": filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix) with tf.gfile.GFile(filename, "w") as fh: fh.write(self._string_data) # Smoke test output = reader.ptb_raw_data(tmpdir) self.assertEqual(len(output), 4)
Example #4
Source File: reader_test.py From HumanRecognition with MIT License | 5 votes |
def testPtbRawData(self): tmpdir = tf.test.get_temp_dir() for suffix in "train", "valid", "test": filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix) with tf.gfile.GFile(filename, "w") as fh: fh.write(self._string_data) # Smoke test output = reader.ptb_raw_data(tmpdir) self.assertEqual(len(output), 4)
Example #5
Source File: reader_test.py From object_detection_with_tensorflow with MIT License | 5 votes |
def testPtbRawData(self): tmpdir = tf.test.get_temp_dir() for suffix in "train", "valid", "test": filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix) with tf.gfile.GFile(filename, "w") as fh: fh.write(self._string_data) # Smoke test output = reader.ptb_raw_data(tmpdir) self.assertEqual(len(output), 4)
Example #6
Source File: reader_test.py From object_detection_kitti with Apache License 2.0 | 5 votes |
def testPtbRawData(self): tmpdir = tf.test.get_temp_dir() for suffix in "train", "valid", "test": filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix) with tf.gfile.GFile(filename, "w") as fh: fh.write(self._string_data) # Smoke test output = reader.ptb_raw_data(tmpdir) self.assertEqual(len(output), 4)
Example #7
Source File: reader_test.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 5 votes |
def testPtbRawData(self): tmpdir = tf.test.get_temp_dir() for suffix in "train", "valid", "test": filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix) with tf.gfile.GFile(filename, "w") as fh: fh.write(self._string_data) # Smoke test output = reader.ptb_raw_data(tmpdir) self.assertEqual(len(output), 4)
Example #8
Source File: reader_test.py From ActionRecognition with MIT License | 5 votes |
def testPtbRawData(self): tmpdir = tf.test.get_temp_dir() for suffix in "train", "valid", "test": filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix) with tf.gfile.GFile(filename, "w") as fh: fh.write(self._string_data) # Smoke test output = reader.ptb_raw_data(tmpdir) self.assertEqual(len(output), 4)
Example #9
Source File: rhn_train.py From iss-rnns with Apache License 2.0 | 5 votes |
def get_data(data_path, dataset): if dataset == 'ptb': import reader raw_data = reader.ptb_raw_data(data_path) elif dataset == 'enwik8': from data import reader raw_data = reader.enwik8_raw_data(data_path) elif dataset == 'text8': from data import reader raw_data = reader.text8_raw_data(data_path) return reader, raw_data
Example #10
Source File: reader_test.py From iss-rnns with Apache License 2.0 | 5 votes |
def testPtbRawData(self): tmpdir = tf.test.get_temp_dir() for suffix in "train", "valid", "test": filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix) with tf.gfile.GFile(filename, "w") as fh: fh.write(self._string_data) # Smoke test output = reader.ptb_raw_data(tmpdir) self.assertEqual(len(output), 4)
Example #11
Source File: reader_test.py From Gun-Detector with Apache License 2.0 | 5 votes |
def testPtbRawData(self): tmpdir = tf.test.get_temp_dir() for suffix in "train", "valid", "test": filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix) with tf.gfile.GFile(filename, "w") as fh: fh.write(self._string_data) # Smoke test output = reader.ptb_raw_data(tmpdir) self.assertEqual(len(output), 4)
Example #12
Source File: reader_test.py From yolo_v2 with Apache License 2.0 | 5 votes |
def testPtbRawData(self): tmpdir = tf.test.get_temp_dir() for suffix in "train", "valid", "test": filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix) with tf.gfile.GFile(filename, "w") as fh: fh.write(self._string_data) # Smoke test output = reader.ptb_raw_data(tmpdir) self.assertEqual(len(output), 4)
Example #13
Source File: reader_test.py From DOTA_models with Apache License 2.0 | 5 votes |
def testPtbRawData(self): tmpdir = tf.test.get_temp_dir() for suffix in "train", "valid", "test": filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix) with tf.gfile.GFile(filename, "w") as fh: fh.write(self._string_data) # Smoke test output = reader.ptb_raw_data(tmpdir) self.assertEqual(len(output), 4)
Example #14
Source File: ptb_word_lm.py From hands-detection with MIT License | 4 votes |
def main(_): if not FLAGS.data_path: raise ValueError("Must set --data_path to PTB data directory") raw_data = reader.ptb_raw_data(FLAGS.data_path) train_data, valid_data, test_data, _ = raw_data config = get_config() eval_config = get_config() eval_config.batch_size = 1 eval_config.num_steps = 1 with tf.Graph().as_default(): initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.name_scope("Train"): train_input = PTBInput(config=config, data=train_data, name="TrainInput") with tf.variable_scope("Model", reuse=None, initializer=initializer): m = PTBModel(is_training=True, config=config, input_=train_input) tf.summary.scalar("Training Loss", m.cost) tf.summary.scalar("Learning Rate", m.lr) with tf.name_scope("Valid"): valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) tf.summary.scalar("Validation Loss", mvalid.cost) with tf.name_scope("Test"): test_input = PTBInput(config=eval_config, data=test_data, name="TestInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mtest = PTBModel(is_training=False, config=eval_config, input_=test_input) sv = tf.train.Supervisor(logdir=FLAGS.save_path) with sv.managed_session() as session: for i in range(config.max_max_epoch): lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0) m.assign_lr(session, config.learning_rate * lr_decay) print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr))) train_perplexity = run_epoch(session, m, eval_op=m.train_op, verbose=True) print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity)) valid_perplexity = run_epoch(session, mvalid) print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity)) test_perplexity = run_epoch(session, mtest) print("Test Perplexity: %.3f" % test_perplexity) if FLAGS.save_path: print("Saving model to %s." % FLAGS.save_path) sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)
Example #15
Source File: ptb_word_lm.py From DOTA_models with Apache License 2.0 | 4 votes |
def main(_): if not FLAGS.data_path: raise ValueError("Must set --data_path to PTB data directory") raw_data = reader.ptb_raw_data(FLAGS.data_path) train_data, valid_data, test_data, _ = raw_data config = get_config() eval_config = get_config() eval_config.batch_size = 1 eval_config.num_steps = 1 with tf.Graph().as_default(): initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.name_scope("Train"): train_input = PTBInput(config=config, data=train_data, name="TrainInput") with tf.variable_scope("Model", reuse=None, initializer=initializer): m = PTBModel(is_training=True, config=config, input_=train_input) tf.summary.scalar("Training Loss", m.cost) tf.summary.scalar("Learning Rate", m.lr) with tf.name_scope("Valid"): valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) tf.summary.scalar("Validation Loss", mvalid.cost) with tf.name_scope("Test"): test_input = PTBInput(config=eval_config, data=test_data, name="TestInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mtest = PTBModel(is_training=False, config=eval_config, input_=test_input) sv = tf.train.Supervisor(logdir=FLAGS.save_path) with sv.managed_session() as session: for i in range(config.max_max_epoch): lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0) m.assign_lr(session, config.learning_rate * lr_decay) print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr))) train_perplexity = run_epoch(session, m, eval_op=m.train_op, verbose=True) print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity)) valid_perplexity = run_epoch(session, mvalid) print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity)) test_perplexity = run_epoch(session, mtest) print("Test Perplexity: %.3f" % test_perplexity) if FLAGS.save_path: print("Saving model to %s." % FLAGS.save_path) sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)
Example #16
Source File: train.py From bit-rnn with Apache License 2.0 | 4 votes |
def main(_): if not FLAGS.data_path: raise ValueError("Must set --data_path to PTB data directory") raw_data = reader.ptb_raw_data(FLAGS.data_path) train_data, valid_data, test_data, _ = raw_data config = get_config() eval_config = get_config() eval_config.batch_size = 1 eval_config.num_steps = 1 with tf.Graph().as_default(), tf.Session() as session: initializer = tf.initializers.variance_scaling(distribution='uniform') with tf.variable_scope("model", reuse=tf.AUTO_REUSE, initializer=initializer): m = PTBModel(is_training=True, config=config) with tf.variable_scope("model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config) mtest = PTBModel(is_training=False, config=eval_config) tf.global_variables_initializer().run() def get_learning_rate(epoch, config): base_lr = config.learning_rate if epoch <= config.nr_epoch_first_stage: return base_lr elif epoch <= config.nr_epoch_second_stage: return base_lr * 0.1 else: return base_lr * 0.01 for i in range(config.max_epoch): m.assign_lr(session, get_learning_rate(i, config)) print("Epoch: %d Learning rate: %f" % (i + 1, session.run(m.lr))) train_perplexity = run_epoch( session, m, train_data, m.train_op, verbose=True) print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity)) valid_perplexity = run_epoch( session, mvalid, valid_data, tf.no_op()) print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity)) test_perplexity = run_epoch( session, mtest, test_data, tf.no_op()) print("Test Perplexity: %.3f" % test_perplexity)
Example #17
Source File: lstm.py From dlbench with MIT License | 4 votes |
def main(_): if not FLAGS.data_path: raise ValueError("Must set --data_path to PTB data directory") raw_data = reader.ptb_raw_data(FLAGS.data_path) train_data, valid_data, test_data, _ = raw_data config = get_config() eval_config = get_config() eval_config.batch_size = 1 eval_config.num_steps = 1 if config.device == '-1': tf_dev = '/cpu:0' else: tf_dev = '/gpu:' + config.device print(tf_dev) tconfig = tf.ConfigProto(allow_soft_placement=True) if tf_dev.find('cpu') >= 0: # cpu version num_threads = os.getenv('OMP_NUM_THREADS', 1) tconfig = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=int(num_threads)) with tf.Graph().as_default(), tf.device(tf_dev), tf.Session(config=tconfig) as session: initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.variable_scope("model", reuse=None, initializer=initializer): m = PTBModel(is_training=True, config=config) # with tf.variable_scope("model", reuse=True, initializer=initializer): # mvalid = PTBModel(is_training=False, config=config) # mtest = PTBModel(is_training=False, config=eval_config) tf.initialize_all_variables().run() for i in range(config.max_max_epoch): lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0) m.assign_lr(session, config.learning_rate * lr_decay) print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr))) train_perplexity = run_epoch(session, m, train_data, m.train_op, config.iters, verbose=True) # print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity)) # valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op()) # print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity)) # test_perplexity = run_epoch(session, mtest, test_data, tf.no_op()) # print("Test Perplexity: %.3f" % test_perplexity)
Example #18
Source File: lstm.py From dlbench with MIT License | 4 votes |
def main(_): if not FLAGS.data_path: raise ValueError("Must set --data_path to PTB data directory") raw_data = reader.ptb_raw_data(FLAGS.data_path) train_data, valid_data, test_data, _ = raw_data config = get_config() eval_config = get_config() eval_config.batch_size = 1 eval_config.num_steps = 1 if config.device == '-1': tf_dev = '/cpu:0' else: tf_dev = '/gpu:' + config.device print(tf_dev) tconfig = tf.ConfigProto(allow_soft_placement=True) if tf_dev.find('cpu') >= 0: # cpu version num_threads = os.getenv('OMP_NUM_THREADS', 1) tconfig = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=int(num_threads)) with tf.Graph().as_default(), tf.device(tf_dev), tf.Session(config=tconfig) as session: initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.variable_scope("model", reuse=None, initializer=initializer): m = PTBModel(is_training=True, config=config) with tf.variable_scope("model", reuse=True, initializer=initializer): #mvalid = PTBModel(is_training=False, config=config) mtest = PTBModel(is_training=False, config=eval_config) tf.global_variables_initializer().run() total_average_batch_time = 0.0 epochs_info = [] for i in range(config.max_max_epoch): #lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0) #m.assign_lr(session, config.learning_rate * lr_decay) m.assign_lr(session, config.learning_rate) print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr))) train_perplexity, average_batch_time = run_epoch(session, m, train_data, m.train_op, verbose=True) total_average_batch_time += average_batch_time print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity)) if i % 2 == 0: epochs_info.append('%d:_:%.3f'%(i, train_perplexity)) # valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op()) # print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity)) print("average_batch_time: %.6f" % (total_average_batch_time/int(config.max_max_epoch))) print('epoch_info:'+','.join(epochs_info)) test_perplexity, test_average_batch_time = run_epoch(session, mtest, test_data, tf.no_op()) print("Test Perplexity: %.3f" % test_perplexity)
Example #19
Source File: ptb_word_lm.py From HumanRecognition with MIT License | 4 votes |
def main(_): if not FLAGS.data_path: raise ValueError("Must set --data_path to PTB data directory") raw_data = reader.ptb_raw_data(FLAGS.data_path) train_data, valid_data, test_data, _ = raw_data config = get_config() eval_config = get_config() eval_config.batch_size = 1 eval_config.num_steps = 1 with tf.Graph().as_default(): initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.name_scope("Train"): train_input = PTBInput(config=config, data=train_data, name="TrainInput") with tf.variable_scope("Model", reuse=None, initializer=initializer): m = PTBModel(is_training=True, config=config, input_=train_input) tf.summary.scalar("Training Loss", m.cost) tf.summary.scalar("Learning Rate", m.lr) with tf.name_scope("Valid"): valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) tf.summary.scalar("Validation Loss", mvalid.cost) with tf.name_scope("Test"): test_input = PTBInput(config=eval_config, data=test_data, name="TestInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mtest = PTBModel(is_training=False, config=eval_config, input_=test_input) sv = tf.train.Supervisor(logdir=FLAGS.save_path) with sv.managed_session() as session: for i in range(config.max_max_epoch): lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0) m.assign_lr(session, config.learning_rate * lr_decay) print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr))) train_perplexity = run_epoch(session, m, eval_op=m.train_op, verbose=True) print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity)) valid_perplexity = run_epoch(session, mvalid) print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity)) test_perplexity = run_epoch(session, mtest) print("Test Perplexity: %.3f" % test_perplexity) if FLAGS.save_path: print("Saving model to %s." % FLAGS.save_path) sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)
Example #20
Source File: ptb_word_lm.py From YellowFin with Apache License 2.0 | 4 votes |
def main(_): if not FLAGS.data_path: raise ValueError("Must set --data_path to PTB data directory") raw_data = reader.ptb_raw_data(FLAGS.data_path) train_data, valid_data, test_data, _ = raw_data config = get_config() eval_config = get_config() eval_config.batch_size = 1 eval_config.num_steps = 1 with tf.Graph().as_default(): initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.name_scope("Train"): train_input = PTBInput(config=config, data=train_data, name="TrainInput") with tf.variable_scope("Model", reuse=None, initializer=initializer): m = PTBModel(is_training=True, config=config, input_=train_input) tf.scalar_summary("Training Loss", m.cost) tf.scalar_summary("Learning Rate", m.lr) with tf.name_scope("Valid"): valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) tf.scalar_summary("Validation Loss", mvalid.cost) with tf.name_scope("Test"): test_input = PTBInput(config=eval_config, data=test_data, name="TestInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mtest = PTBModel(is_training=False, config=eval_config, input_=test_input) sv = tf.train.Supervisor(logdir=FLAGS.save_path) with sv.managed_session() as session: # session = sv.managed_session() # with tf.Session() as session: for i in range(config.max_max_epoch): lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0) m.assign_lr(session, config.learning_rate * lr_decay) print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr))) train_perplexity = run_epoch(session, m, eval_op=m.train_op, verbose=True) print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity)) valid_perplexity = run_epoch(session, mvalid) print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity)) test_perplexity = run_epoch(session, mtest) print("Test Perplexity: %.3f" % test_perplexity) if FLAGS.save_path: print("Saving model to %s." % FLAGS.save_path) sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)