Python tensorflow.string_join() Examples
The following are 30
code examples of tensorflow.string_join().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: datasets.py From DDFlow with MIT License | 6 votes |
def read_and_decode_distillation(self, filename_queue): img1_name = tf.string_join([self.img_dir, '/', filename_queue[0]]) img2_name = tf.string_join([self.img_dir, '/', filename_queue[1]]) img1 = tf.image.decode_png(tf.read_file(img1_name), channels=3) img1 = tf.cast(img1, tf.float32) img2 = tf.image.decode_png(tf.read_file(img2_name), channels=3) img2 = tf.cast(img2, tf.float32) flow_occ_fw_name = tf.string_join([self.fake_flow_occ_dir, '/flow_occ_fw_', filename_queue[2], '.png']) flow_occ_bw_name = tf.string_join([self.fake_flow_occ_dir, '/flow_occ_bw_', filename_queue[2], '.png']) flow_occ_fw = tf.image.decode_png(tf.read_file(flow_occ_fw_name), dtype=tf.uint16, channels=3) flow_occ_fw = tf.cast(flow_occ_fw, tf.float32) flow_occ_bw = tf.image.decode_png(tf.read_file(flow_occ_bw_name), dtype=tf.uint16, channels=3) flow_occ_bw = tf.cast(flow_occ_bw, tf.float32) flow_fw, occ_fw = self.extract_flow_and_mask(flow_occ_fw) flow_bw, occ_bw = self.extract_flow_and_mask(flow_occ_bw) return img1, img2, flow_fw, flow_bw, occ_fw, occ_bw
Example #2
Source File: string_join_op_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testStringJoin(self): input0 = ["a", "b"] input1 = "a" input2 = [["b"], ["c"]] with self.test_session(): output = tf.string_join([input0, input1]) self.assertAllEqual(output.eval(), [b"aa", b"ba"]) output = tf.string_join([input0, input1], separator="--") self.assertAllEqual(output.eval(), [b"a--a", b"b--a"]) output = tf.string_join([input0, input1, input0], separator="--") self.assertAllEqual(output.eval(), [b"a--a--a", b"b--a--b"]) output = tf.string_join([input1] * 4, separator="!") self.assertEqual(output.eval(), b"a!a!a!a") output = tf.string_join([input2] * 2, separator="") self.assertAllEqual(output.eval(), [[b"bb"], [b"cc"]]) with self.assertRaises(ValueError): # Inconsistent shapes tf.string_join([input0, input2]).eval()
Example #3
Source File: datasets.py From DDFlow with MIT License | 6 votes |
def read_and_decode_ppm(self, filename_queue): def read_ppm(self, filename): img = misc.imread(filename).astype('float32') return img flying_h = 384 flying_w = 512 img1_name = tf.string_join([self.img_dir, '/', filename_queue[0]]) img2_name = tf.string_join([self.img_dir, '/', filename_queue[1]]) img1 = tf.py_func(read_ppm, [img1_name], tf.float32) img2 = tf.py_func(read_ppm, [img2_name], tf.float32) img1 = tf.reshape(img1, [flying_h, flying_w, 3]) img2 = tf.reshape(img2, [flying_h, flying_w, 3]) return img1, img2
Example #4
Source File: Reader.py From PReMVOS with MIT License | 5 votes |
def add_distance_transform(tensors, labels, distance_transform_fn): args_list = [tensors["unnormalized_img"], tensors["label"], tensors["raw_label"], labels[Constants.STRATEGY], labels[Constants.IGNORE_CLASSES]] if "old_label" in tensors: args_list.append(tensors["old_label"]) u0, u1, num_clicks = tf.py_func(distance_transform_fn, args_list, [tf.float32, tf.float32, tf.int64], name="create_distance_transform") u0 = tf.expand_dims(u0, axis=2) u0.set_shape(tensors["unnormalized_img"].get_shape().as_list()[:-1] + [1]) u1 = tf.expand_dims(u1, axis=2) u1.set_shape(tensors["unnormalized_img"].get_shape().as_list()[:-1] + [1]) shape = tensors["tag"].get_shape() im_path = tf.string_join([tensors["tag"], tf.as_string(num_clicks)], separator=":", name="JoinPath") im_path.set_shape(shape) tensors[Constants.DT_NEG] = u0 tensors[Constants.DT_POS] = u1 tensors["tag"] = im_path return tensors
Example #5
Source File: train.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def make_status_message(model): """Makes a string `Tensor` of training status.""" return tf.string_join( [ 'Starting train step: current_image_id: ', tf.as_string(model.current_image_id), ', progress: ', tf.as_string(model.progress), ', num_blocks: {}'.format( model.num_blocks), ', batch_size: {}'.format(model.batch_size) ], name='status_message')
Example #6
Source File: model_utils.py From CipherGAN with MIT License | 5 votes |
def log_text(F, G, params): lookup_table = construct_vocab_lookup_table(params.vocab) X_vocab = tf.expand_dims(tf.range(params.vocab_size), axis=0) if params.use_embeddings: X = embed_inputs(X_vocab, params, reuse=True) else: X = tf.one_hot(X_vocab, depth=params.vocab_size) X_map_distribution = F(X, params.F, params) X_map_indices = tf.argmax(X_map_distribution, axis=-1) # X_vocab = tf.Print(X_vocab, [X_vocab], message="X_vocab", summarize=10) # X_map_indices = tf.Print( # X_map_indices, [X_map_indices], message="X_map_indices", summarize=10) X_map_text = lookup_table.lookup(tf.to_int64(X_map_indices)) X_vocab_text = lookup_table.lookup(tf.to_int64(X_vocab)) X_text = tf.string_join([X_vocab_text, "->", X_map_text]) tf.summary.text("F_map", X_text) Y_vocab = tf.expand_dims(tf.range(params.vocab_size), axis=0) if params.use_embeddings: Y = embed_inputs(Y_vocab, params, reuse=True) else: Y = tf.one_hot(Y_vocab, depth=params.vocab_size) Y_map_distribution = G(Y, params.G, params) Y_map_indices = tf.argmax(Y_map_distribution, axis=-1) # Y_vocab = tf.Print(Y_vocab, [Y_vocab], message="Y_vocab", summarize=10) # Y_map_indices = tf.Print( # Y_map_indices, [Y_map_indices], message="Y_map_indices", summarize=10) Y_map_text = lookup_table.lookup(tf.to_int64(Y_map_indices)) Y_vocab_text = lookup_table.lookup(tf.to_int64(Y_vocab)) Y_text = tf.string_join([Y_vocab_text, "->", Y_map_text]) tf.summary.text("G_map", Y_text)
Example #7
Source File: tfrecords_to_bigtable.py From class-balanced-loss with MIT License | 5 votes |
def build_row_key_dataset(num_records, row_prefix): if num_records is not None: ds = tf.data.Dataset.range(num_records) else: ds = tf.contrib.data.Counter() if num_records is None: width = 10 else: width = pad_width(num_records) ds = ds.map(lambda idx: tf.as_string(idx, width=width, fill='0')) if row_prefix is not None: ds = ds.map(lambda idx: tf.string_join([row_prefix, idx])) return ds
Example #8
Source File: inputs.py From dcase2018_baseline with MIT License | 5 votes |
def clip_to_waveform(clip, clip_dir=None): """Decodes a WAV clip into a waveform tensor.""" # Decode the WAV-format clip into a waveform tensor where # the values lie in [-1, +1]. clip_path = tf.string_join([clip_dir, clip], separator=os.sep) clip_data = tf.read_file(clip_path) waveform, sr = tf_audio.decode_wav(clip_data) # Assert that the clip has the expected sample rate. check_sr = tf.assert_equal(sr, SAMPLE_RATE) # and that it is mono. check_channels = tf.assert_equal(tf.shape(waveform)[1], 1) with tf.control_dependencies([tf.group(check_sr, check_channels)]): return tf.squeeze(waveform)
Example #9
Source File: datasets.py From DDFlow with MIT License | 5 votes |
def read_and_decode(self, filename_queue): img1_name = tf.string_join([self.img_dir, '/', filename_queue[0]]) img2_name = tf.string_join([self.img_dir, '/', filename_queue[1]]) img1 = tf.image.decode_png(tf.read_file(img1_name), channels=3) img1 = tf.cast(img1, tf.float32) img2 = tf.image.decode_png(tf.read_file(img2_name), channels=3) img2 = tf.cast(img2, tf.float32) return img1, img2 # For Flying Chairs, the image type is ppm, please use "read_and_decode_ppm" instead of "read_and_decode". # Similarily, for other image types, please write their decode functions by yourself.
Example #10
Source File: bottles_of_bear.py From EsotericTensorFlow with MIT License | 5 votes |
def body(self, i, text): before, after, before_uppercase = tf.cond(tf.equal(i, 0), lambda: ('no more bottles', '99 bottles', 'No more bottles'), lambda: tf.cond(tf.equal(i, 1), lambda: ('1 bottle', 'no more bottles', '1 bottle'), lambda: tf.cond(tf.equal(i, 2), lambda: ('2 bottles', '1 bottle', '2 bottles'), lambda: (tf.string_join([tf.as_string(i), ' bottles'], ''), tf.string_join([tf.as_string(tf.subtract(i, 1)), ' bottles']), tf.string_join([tf.as_string(i), ' bottles'], ''))))) action = tf.cond(tf.equal(i, 0), lambda: tf.constant('Go to the store and buy some more'), lambda: tf.constant('Take one down and pass it around')) return tf.subtract(i, 1), tf.string_join([text, tf.string_join([before_uppercase, ' of beer on the wall, ', before, ' of beer.\n', action, ', ', after, ' of beer on the wall.\n'])])
Example #11
Source File: brain_fuck.py From EsotericTensorFlow with MIT License | 5 votes |
def body(self, pc, tape, cur, jumps, output): token = self.tokens[pc] inc_pc = tf.add(pc, 1) def stdin(c): #return tf.assign(self.tape[c], input('')) return self.tape return tf.cond(tf.equal(token, '+'), lambda: (inc_pc, tf.assign(self.tape[cur], self.tape[cur]+1), cur, jumps, output), lambda: tf.cond(tf.equal(token, '-'), lambda: (inc_pc, tf.assign(self.tape[cur], self.tape[cur]-1), cur, jumps, output), lambda: tf.cond(tf.equal(token, '>'), lambda: (inc_pc, tape, tf.add(cur, 1), jumps, output), lambda: tf.cond(tf.equal(token, '<'), lambda: (inc_pc, tape, tf.subtract(cur, 1), jumps, output), lambda: tf.cond(tf.equal(token, '.'), lambda: (inc_pc, tape, cur, jumps, tf.string_join([output, ascii2char(tape[cur])])), lambda: tf.cond(tf.equal(token, ','), lambda: (inc_pc, stdin(cur), cur, jumps, output), lambda: tf.cond(tf.equal(token, '['), lambda: tf.cond(tf.equal(self.tape[cur], 0), lambda: (jumps[pc], tape, cur, jumps, output), lambda: (inc_pc, tape, cur, jumps, output)), lambda: tf.cond(tf.equal(token, ']'), lambda: tf.cond(tf.not_equal(self.tape[cur], 0), lambda: (jumps[pc], tape, cur, jumps, output), lambda: (inc_pc, tape, cur, jumps, output)), lambda: (inc_pc, tape, cur, jumps, output) ))))))))
Example #12
Source File: train.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def make_status_message(model): """Makes a string `Tensor` of training status.""" return tf.string_join( [ 'Starting train step: current_image_id: ', tf.as_string(model.current_image_id), ', progress: ', tf.as_string(model.progress), ', num_blocks: {}'.format( model.num_blocks), ', batch_size: {}'.format(model.batch_size) ], name='status_message')
Example #13
Source File: dataset.py From spleeter with MIT License | 5 votes |
def expand_path(self, sample): """ Expands audio paths for the given sample. """ return dict(sample, **{f'{instrument}_path': tf.string_join( (self._audio_path, sample[f'{instrument}_path']), SEPARATOR) for instrument in self._instruments})
Example #14
Source File: caption_infer.py From unsupervised_captioning with MIT License | 5 votes |
def read_image(im): """Reads an image.""" filename = tf.string_join([FLAGS.data_dir, im]) image = tf.read_file(filename) image = tf.image.decode_jpeg(image, 3) image = tf.image.convert_image_dtype(image, tf.float32) image = tf.image.resize_images(image, [346, 346]) image = image[23:-24, 23:-24] image = image * 2 - 1 return image
Example #15
Source File: file_util.py From multi-label-classification with MIT License | 5 votes |
def _parse_string_line(string_line, root_path): """ 解析文本中的一行字符串行,得到图片路径(拼接图片根目录)和标签 :param string_line: 文本中的一行字符串,image_name label0 label1 label2 label3 ... :param root_path: 图片根目录 :return: DatasetV1Adapter<(图片路径Tensor(shape=(), dtype=string),标签Tensor(shape=(?,), dtype=float32))> """ strings = tf.string_split([string_line], delimiter=' ').values image_path = tf.string_join([root_path, strings[0]], separator=os.sep) labels = tf.string_to_number(strings[1:]) return image_path, labels
Example #16
Source File: datasets.py From SelFlow with MIT License | 5 votes |
def read_and_decode(self, filename_queue): img0_name = tf.string_join([self.img_dir, '/', filename_queue[0]]) img1_name = tf.string_join([self.img_dir, '/', filename_queue[1]]) img2_name = tf.string_join([self.img_dir, '/', filename_queue[2]]) img0 = tf.image.decode_png(tf.read_file(img0_name), channels=3) img0 = tf.cast(img0, tf.float32) img1 = tf.image.decode_png(tf.read_file(img1_name), channels=3) img1 = tf.cast(img1, tf.float32) img2 = tf.image.decode_png(tf.read_file(img2_name), channels=3) img2 = tf.cast(img2, tf.float32) return img0, img1, img2 # For Validation or Testing
Example #17
Source File: inputs.py From dcase2019_task2_baseline with MIT License | 5 votes |
def clip_to_waveform(clip, clip_dir=None): """Decodes a WAV clip into a waveform tensor.""" # Decode the WAV-format clip into a waveform tensor where # the values lie in [-1, +1]. clip_path = tf.string_join([clip_dir, clip], separator=os.sep) clip_data = tf.read_file(clip_path) waveform, sr = tf_audio.decode_wav(clip_data) #waveform = tf.Print(waveform, [tf.shape(waveform), waveform], message='Waveform:', summarize=100) # Assert that the clip has the expected sample rate. check_sr = tf.assert_equal(sr, SAMPLE_RATE) # and that it is mono. check_channels = tf.assert_equal(tf.shape(waveform)[1], 1) with tf.control_dependencies([tf.group(check_sr, check_channels)]): return tf.squeeze(waveform)
Example #18
Source File: batch_sequences_with_states_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def setUp(self): super(BatchSequencesWithStatesTest, self).setUp() self.value_length = 4 self.batch_size = 2 self.key = tf.string_join(["key_", tf.as_string(tf.cast( 10000 * tf.random_uniform(()), tf.int32))]) self.sequences = {"seq1": np.random.rand(self.value_length, 5), "seq2": np.random.rand(self.value_length, 4, 2)} self.context = {"context1": [3, 4]} self.initial_states = {"state1": np.random.rand(6, 7), "state2": np.random.rand(8)}
Example #19
Source File: sequence_queueing_state_saver_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testStateSaverScopeNames(self): batch_size = tf.constant(2) sqss_scope_name = "unique_scope_name_for_sqss" num_unroll = 2 length = 3 key = tf.string_join(["key_", tf.as_string(tf.cast( 10000 * tf.random_uniform(()), tf.int32))]) padded_length = 4 sequences = {"seq1": np.random.rand(padded_length, 5), "seq2": np.random.rand(padded_length, 4, 2)} context = {"context1": [3, 4]} initial_states = {"state1": np.random.rand(6, 7), "state2": np.random.rand(8)} state_saver = tf.contrib.training.SequenceQueueingStateSaver( batch_size=batch_size, num_unroll=num_unroll, input_length=length, input_key=key, input_sequences=sequences, input_context=context, initial_states=initial_states, name=sqss_scope_name) prefetch_op = state_saver.prefetch_op next_batch = state_saver.next_batch self.assertTrue(state_saver.barrier.barrier_ref.name.startswith( "%s/" % sqss_scope_name)) self.assertTrue(prefetch_op.name.startswith("%s/" % sqss_scope_name)) self.assertTrue(next_batch.key.name.startswith("%s/" % sqss_scope_name))
Example #20
Source File: tfrecords_to_bigtable.py From tpu_models with Apache License 2.0 | 5 votes |
def build_row_key_dataset(num_records, row_prefix): if num_records is not None: ds = tf.data.Dataset.range(num_records) else: ds = tf.contrib.data.Counter() if num_records is None: width = 10 else: width = pad_width(num_records) ds = ds.map(lambda idx: tf.as_string(idx, width=width, fill='0')) if row_prefix is not None: ds = ds.map(lambda idx: tf.string_join([row_prefix, idx])) return ds
Example #21
Source File: tfrecords_to_bigtable.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def build_row_key_dataset(num_records, row_prefix): if num_records is not None: ds = tf.data.Dataset.range(num_records) else: ds = tf.contrib.data.Counter() if num_records is None: width = 10 else: width = pad_width(num_records) ds = ds.map(lambda idx: tf.as_string(idx, width=width, fill='0')) if row_prefix is not None: ds = ds.map(lambda idx: tf.string_join([row_prefix, idx])) return ds
Example #22
Source File: common.py From HyperGAN with MIT License | 5 votes |
def __init__(self, config, batch_size, one_hot=False): self.lookup = None reader = tf.TextLineReader() filename_queue = tf.train.string_input_producer(["chargan.txt"]) key, x = reader.read(filename_queue) vocabulary = self.get_vocabulary() table = tf.contrib.lookup.string_to_index_table_from_tensor( mapping = vocabulary, default_value = 0) x = tf.string_join([x, tf.constant(" " * 64)]) x = tf.substr(x, [0], [64]) x = tf.string_split(x,delimiter='') x = tf.sparse_tensor_to_dense(x, default_value=' ') x = tf.reshape(x, [64]) x = table.lookup(x) self.one_hot = one_hot if one_hot: x = tf.one_hot(x, len(vocabulary)) x = tf.cast(x, dtype=tf.float32) x = tf.reshape(x, [1, int(x.get_shape()[0]), int(x.get_shape()[1]), 1]) else: x = tf.cast(x, dtype=tf.float32) x -= len(vocabulary)/2.0 x /= len(vocabulary)/2.0 x = tf.reshape(x, [1,1, 64, 1]) num_preprocess_threads = 8 x = tf.train.shuffle_batch( [x], batch_size=batch_size, num_threads=num_preprocess_threads, capacity= 5000, min_after_dequeue=500, enqueue_many=True) self.x = x self.table = table
Example #23
Source File: training_graph.py From deep_lip_reading with Apache License 2.0 | 4 votes |
def add_tb_summaries(self): from util.tb_util import add_gif_summary, colorize_image fps = 10 timeline = False # ---------------- Add video summaries ------------------------------- bs = int(self.visual_frontend.output.shape[0]) b_id = 0 non_pad_inds = tf.cast(tf.where(self.padding_mask[b_id] > 0)[:, 0], tf.int64) fr_in, to_in = non_pad_inds[0], non_pad_inds[-1] + 1 # For masking out input paddings add_gif_summary('1-video_input', self.visual_frontend.input[b_id][fr_in:to_in], fps=fps, timeline=timeline) if not config.test_aug_times: add_gif_summary('2-input_to_resnet', self.visual_frontend.aug_out[b_id][fr_in:to_in], fps=fps, timeline=timeline) else: # Viz the different test augmentations add_gif_summary('2-input_to_resnet', tf.concat([ self.visual_frontend.aug_out[b_id][fr_in:to_in] for b_id in xrange(bs) ], axis=2), fps=fps, timeline=timeline) # ---------------- Add text summaries ------------------------------- pred_strings_tf = self.char_table.lookup(tf.cast(self.preds, tf.int64)) joined_pred = tf.string_join( tf.split(pred_strings_tf, pred_strings_tf.shape[1], 1))[ :, 0] gt_strings_tf = self.char_table.lookup(tf.cast(self.y, tf.int64)) joined_gt = tf.string_join( tf.split(gt_strings_tf, pred_strings_tf.shape[1], 1))[:, 0] joined_all = tf.string_join([joined_gt, joined_pred], ' --> ') tf.summary.text('Predictions', joined_all) # ---------------- Add image summaries ------------------------------- all_atts = [] for layer_name, alignment_history in self.alignment_history.items(): for att_head_idx, attention_images in enumerate(alignment_history): all_atts.append(attention_images) avg_att = tf.exp(tf.reduce_mean(tf.log(all_atts), axis=0)) # Permute and reshape (batch, t_dec, t_enc) --> (batch, t_enc, t_dec, 1) attention_img = tf.expand_dims(tf.transpose(avg_att, [0, 2, 1]), -1) attention_img *= 255 # Scale to range [0, 255] b_id = 0 # visualize only the first sample of the batch to_out = tf.where( self.preds[b_id]> 0 )[-1][0] + 1 # To mask output paddings |~ color_img = tf.map_fn( colorize_image, (attention_img[:, fr_in:to_in, :to_out]) ) tf.summary.image("3-enc_dec_attention", color_img) # ---------------- Add image with subs summaries ------------------------------- # import ipdb; ipdb.set_trace() add_gif_summary('4-subs', self.visual_frontend.input[b_id][fr_in:to_in], fps=fps, timeline=timeline, attention=attention_img[b_id][fr_in:to_in, :to_out,0], pred=joined_pred[b_id])
Example #24
Source File: train.py From g-tensorflow-models with Apache License 2.0 | 4 votes |
def main(_): # Create the log_dir if not exist. if not tf.gfile.Exists(FLAGS.train_log_dir): tf.gfile.MakeDirs(FLAGS.train_log_dir) # Shard the model to different parameter servers. with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): # Create the input dataset. with tf.name_scope('inputs'): images, labels = data_provider.provide_data( FLAGS.image_file_patterns, FLAGS.batch_size, FLAGS.patch_size) # Define the model. with tf.name_scope('model'): model = _define_model(images, labels) # Add image summary. tfgan.eval.add_stargan_image_summaries( model, num_images=len(FLAGS.image_file_patterns) * FLAGS.batch_size, display_diffs=True) # Define the model loss. loss = tfgan.stargan_loss(model) # Define the train ops. with tf.name_scope('train_ops'): train_ops = _define_train_ops(model, loss) # Define the train steps. train_steps = _define_train_step() # Define a status message. status_message = tf.string_join( [ 'Starting train step: ', tf.as_string(tf.train.get_or_create_global_step()) ], name='status_message') # Train the model. tfgan.gan_train( train_ops, FLAGS.train_log_dir, get_hooks_fn=tfgan.get_sequential_train_hooks(train_steps), hooks=[ tf.train.StopAtStepHook(num_steps=FLAGS.max_number_of_steps), tf.train.LoggingTensorHook([status_message], every_n_iter=10) ], master=FLAGS.master, is_chief=FLAGS.task == 0)
Example #25
Source File: train.py From g-tensorflow-models with Apache License 2.0 | 4 votes |
def main(_): if not tf.gfile.Exists(FLAGS.train_log_dir): tf.gfile.MakeDirs(FLAGS.train_log_dir) with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): with tf.name_scope('inputs'): images_x, images_y = data_provider.provide_custom_data( [FLAGS.image_set_x_file_pattern, FLAGS.image_set_y_file_pattern], batch_size=FLAGS.batch_size, patch_size=FLAGS.patch_size) # Set batch size for summaries. images_x.set_shape([FLAGS.batch_size, None, None, None]) images_y.set_shape([FLAGS.batch_size, None, None, None]) # Define CycleGAN model. cyclegan_model = _define_model(images_x, images_y) # Define CycleGAN loss. cyclegan_loss = tfgan.cyclegan_loss( cyclegan_model, cycle_consistency_loss_weight=FLAGS.cycle_consistency_loss_weight, tensor_pool_fn=tfgan.features.tensor_pool) # Define CycleGAN train ops. train_ops = _define_train_ops(cyclegan_model, cyclegan_loss) # Training train_steps = tfgan.GANTrainSteps(1, 1) status_message = tf.string_join( [ 'Starting train step: ', tf.as_string(tf.train.get_or_create_global_step()) ], name='status_message') if not FLAGS.max_number_of_steps: return tfgan.gan_train( train_ops, FLAGS.train_log_dir, get_hooks_fn=tfgan.get_sequential_train_hooks(train_steps), hooks=[ tf.train.StopAtStepHook(num_steps=FLAGS.max_number_of_steps), tf.train.LoggingTensorHook([status_message], every_n_iter=10) ], master=FLAGS.master, is_chief=FLAGS.task == 0)
Example #26
Source File: greeter_summary.py From tensorboard-plugin-example with Apache License 2.0 | 4 votes |
def op(name, guest, display_name=None, description=None, collections=None): """Create a TensorFlow summary op to greet the given guest. Arguments: name: A name for this summary operation. guest: A rank-0 string `Tensor`. display_name: If set, will be used as the display name in TensorBoard. Defaults to `name`. description: A longform readable description of the summary data. Markdown is supported. collections: Which TensorFlow graph collections to add the summary op to. Defaults to `['summaries']`. Can usually be ignored. """ # The `name` argument is used to generate the summary op node name. # That node name will also involve the TensorFlow name scope. # By having the display_name default to the name argument, we make # the TensorBoard display clearer. if display_name is None: display_name = name # We could pass additional metadata other than the PLUGIN_NAME within the # plugin data by using the content parameter, but we don't need any metadata # for this simple example. summary_metadata = tf.SummaryMetadata( display_name=display_name, summary_description=description, plugin_data=tf.SummaryMetadata.PluginData( plugin_name=PLUGIN_NAME)) message = tf.string_join(['Hello, ', guest, '!']) # Return a summary op that is properly configured. return tf.summary.tensor_summary( name, message, summary_metadata=summary_metadata, collections=collections)
Example #27
Source File: train.py From Gun-Detector with Apache License 2.0 | 4 votes |
def main(_): if not tf.gfile.Exists(FLAGS.train_log_dir): tf.gfile.MakeDirs(FLAGS.train_log_dir) with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): with tf.name_scope('inputs'): images_x, images_y = data_provider.provide_custom_datasets( [FLAGS.image_set_x_file_pattern, FLAGS.image_set_y_file_pattern], batch_size=FLAGS.batch_size, patch_size=FLAGS.patch_size) # Define CycleGAN model. cyclegan_model = _define_model(images_x, images_y) # Define CycleGAN loss. cyclegan_loss = tfgan.cyclegan_loss( cyclegan_model, cycle_consistency_loss_weight=FLAGS.cycle_consistency_loss_weight, tensor_pool_fn=tfgan.features.tensor_pool) # Define CycleGAN train ops. train_ops = _define_train_ops(cyclegan_model, cyclegan_loss) # Training train_steps = tfgan.GANTrainSteps(1, 1) status_message = tf.string_join( [ 'Starting train step: ', tf.as_string(tf.train.get_or_create_global_step()) ], name='status_message') if not FLAGS.max_number_of_steps: return tfgan.gan_train( train_ops, FLAGS.train_log_dir, get_hooks_fn=tfgan.get_sequential_train_hooks(train_steps), hooks=[ tf.train.StopAtStepHook(num_steps=FLAGS.max_number_of_steps), tf.train.LoggingTensorHook([status_message], every_n_iter=10) ], master=FLAGS.master, is_chief=FLAGS.task == 0)
Example #28
Source File: _ds_csv.py From tensorfx with Apache License 2.0 | 4 votes |
def parse_csv(schema, instances, prediction): """A wrapper around decode_csv that parses csv instances based on provided Schema information. """ if prediction: # For training and evaluation data, the expectation is the target column is always present. # For prediction however, the target may or may not be present. # - In true prediction use-cases, the target is unknown and never present. # - In prediction for model evaluation use-cases, the target is present. # To use a single prediction graph, the missing target needs to be detected by comparing # number of columns in instances with number of columns defined in the schema. If there are # fewer columns, then prepend a ',' (with assumption that target is always the first column). # # To get the number of columns in instances, split on the ',' on the first instance, and use # the first dimension of the shape of the resulting substring values. columns = tf.shape(tf.string_split([instances[0]], delimiter=',').values)[0] instances = tf.cond(tf.less(columns, len(schema)), lambda: tf.string_join([tf.constant(','), instances]), lambda: instances) # Convert the schema into a set of tensor defaults, to be used for parsing csv data. defaults = [] for field in schema: if field.length != 1: # TODO: Support variable length, and list columns in csv. raise ValueError('Unsupported schema field "%s". Length must be 1.' % field.name) if field.type == SchemaFieldType.integer: field_default = tf.constant(0, dtype=tf.int64) elif field.type == SchemaFieldType.real: field_default = tf.constant(0.0, dtype=tf.float32) else: # discrete, text, binary field_default = tf.constant('', dtype=tf.string) defaults.append([field_default]) values = tf.decode_csv(instances, defaults, name='csv') parsed_instances = {} for field, value in zip(schema, values): # The parsed values are scalars, so each tensor is of shape (None,); turn them into tensors # of shape (None, 1). parsed_instances[field.name] = tf.expand_dims(value, axis=1, name=field.name) return parsed_instances
Example #29
Source File: train.py From multilabel-image-classification-tensorflow with MIT License | 4 votes |
def main(_): # Create the log_dir if not exist. if not tf.gfile.Exists(FLAGS.train_log_dir): tf.gfile.MakeDirs(FLAGS.train_log_dir) # Shard the model to different parameter servers. with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): # Create the input dataset. with tf.name_scope('inputs'): images, labels = data_provider.provide_data( FLAGS.image_file_patterns, FLAGS.batch_size, FLAGS.patch_size) # Define the model. with tf.name_scope('model'): model = _define_model(images, labels) # Add image summary. tfgan.eval.add_stargan_image_summaries( model, num_images=len(FLAGS.image_file_patterns) * FLAGS.batch_size, display_diffs=True) # Define the model loss. loss = tfgan.stargan_loss(model) # Define the train ops. with tf.name_scope('train_ops'): train_ops = _define_train_ops(model, loss) # Define the train steps. train_steps = _define_train_step() # Define a status message. status_message = tf.string_join( [ 'Starting train step: ', tf.as_string(tf.train.get_or_create_global_step()) ], name='status_message') # Train the model. tfgan.gan_train( train_ops, FLAGS.train_log_dir, get_hooks_fn=tfgan.get_sequential_train_hooks(train_steps), hooks=[ tf.train.StopAtStepHook(num_steps=FLAGS.max_number_of_steps), tf.train.LoggingTensorHook([status_message], every_n_iter=10) ], master=FLAGS.master, is_chief=FLAGS.task == 0)
Example #30
Source File: train.py From multilabel-image-classification-tensorflow with MIT License | 4 votes |
def main(_): if not tf.gfile.Exists(FLAGS.train_log_dir): tf.gfile.MakeDirs(FLAGS.train_log_dir) with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): with tf.name_scope('inputs'): images_x, images_y = data_provider.provide_custom_data( [FLAGS.image_set_x_file_pattern, FLAGS.image_set_y_file_pattern], batch_size=FLAGS.batch_size, patch_size=FLAGS.patch_size) # Set batch size for summaries. images_x.set_shape([FLAGS.batch_size, None, None, None]) images_y.set_shape([FLAGS.batch_size, None, None, None]) # Define CycleGAN model. cyclegan_model = _define_model(images_x, images_y) # Define CycleGAN loss. cyclegan_loss = tfgan.cyclegan_loss( cyclegan_model, cycle_consistency_loss_weight=FLAGS.cycle_consistency_loss_weight, tensor_pool_fn=tfgan.features.tensor_pool) # Define CycleGAN train ops. train_ops = _define_train_ops(cyclegan_model, cyclegan_loss) # Training train_steps = tfgan.GANTrainSteps(1, 1) status_message = tf.string_join( [ 'Starting train step: ', tf.as_string(tf.train.get_or_create_global_step()) ], name='status_message') if not FLAGS.max_number_of_steps: return tfgan.gan_train( train_ops, FLAGS.train_log_dir, get_hooks_fn=tfgan.get_sequential_train_hooks(train_steps), hooks=[ tf.train.StopAtStepHook(num_steps=FLAGS.max_number_of_steps), tf.train.LoggingTensorHook([status_message], every_n_iter=10) ], master=FLAGS.master, is_chief=FLAGS.task == 0)