Python tensorflow.reverse() Examples
The following are 30
code examples of tensorflow.reverse().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: vgg16.py From SSH-TensorFlow with MIT License | 6 votes |
def fix_variables(self, sess, pretrained_model): print('Fix VGG16 layers..') with tf.variable_scope('Fix_VGG16') as scope: with tf.device("/cpu:0"): # fix the vgg16 issue from conv weights to fc weights # fix RGB to BGR fc6_conv = tf.get_variable("fc6_conv", [7, 7, 512, 4096], trainable=False) fc7_conv = tf.get_variable("fc7_conv", [1, 1, 4096, 4096], trainable=False) conv1_rgb = tf.get_variable("conv1_rgb", [3, 3, 3, 64], trainable=False) restorer_fc = tf.train.Saver({self._scope + "/fc6/weights": fc6_conv, self._scope + "/fc7/weights": fc7_conv, self._scope + "/conv1/conv1_1/weights": conv1_rgb}) restorer_fc.restore(sess, pretrained_model) # print("_variables_to_fix:", self._variables_to_fix) # sess.run(tf.assign(self._variables_to_fix[self._scope + '/fc6/weights:0'], tf.reshape(fc6_conv, # self._variables_to_fix[ # self._scope + '/fc6/weights:0'].get_shape()))) # sess.run(tf.assign(self._variables_to_fix[self._scope + '/fc7/weights:0'], tf.reshape(fc7_conv, # self._variables_to_fix[ # self._scope + '/fc7/weights:0'].get_shape()))) sess.run(tf.assign(self._variables_to_fix[self._scope + '/conv1/conv1_1/weights:0'], tf.reverse(conv1_rgb, [2])))
Example #2
Source File: vgg16.py From RGB-N with MIT License | 6 votes |
def fix_variables(self, sess, pretrained_model): print('Fix VGG16 layers..') with tf.variable_scope('Fix_VGG16') as scope: with tf.device("/cpu:0"): # fix the vgg16 issue from conv weights to fc weights # fix RGB to BGR fc6_conv = tf.get_variable("fc6_conv", [7, 7, 512, 4096], trainable=False) fc7_conv = tf.get_variable("fc7_conv", [1, 1, 4096, 4096], trainable=False) conv1_rgb = tf.get_variable("conv1_rgb", [3, 3, 3, 64], trainable=False) restorer_fc = tf.train.Saver({"vgg_16/fc6/weights": fc6_conv, "vgg_16/fc7/weights": fc7_conv, "vgg_16/conv1/conv1_1/weights": conv1_rgb}) restorer_fc.restore(sess, pretrained_model) sess.run(tf.assign(self._variables_to_fix['vgg_16/fc6/weights:0'], tf.reshape(fc6_conv, self._variables_to_fix['vgg_16/fc6/weights:0'].get_shape()))) sess.run(tf.assign(self._variables_to_fix['vgg_16/fc7/weights:0'], tf.reshape(fc7_conv, self._variables_to_fix['vgg_16/fc7/weights:0'].get_shape()))) sess.run(tf.assign(self._variables_to_fix['vgg_16/conv1/conv1_1/weights:0'], tf.reverse(conv1_rgb, [False,False,True,False]))) #sess.run(tf.assign(self._variables_to_fix['vgg_16/conv1/conv1_1/weights:0'], # tf.reverse(conv1_rgb, [2])))
Example #3
Source File: style_transfer_realtime.py From Chinese-Character-and-Calligraphic-Image-Processing with MIT License | 6 votes |
def VGG(x,para): x = tf.reverse(x, [-1]) - np.array([103.939, 116.779, 123.68]) conv1_1 = conv_layer_relu(x, para['conv1_1'][0], para['conv1_1'][1]) conv1_2 = conv_layer_relu(conv1_1, para['conv1_2'][0], para['conv1_2'][1]) conv1_2_ave = ave_pool(conv1_2) conv2_1 = conv_layer_relu(conv1_2_ave, para['conv2_1'][0], para['conv2_1'][1]) conv2_2 = conv_layer_relu(conv2_1, para['conv2_2'][0], para['conv2_2'][1]) conv2_2_ave = ave_pool(conv2_2) conv3_1 = conv_layer_relu(conv2_2_ave, para['conv3_1'][0], para['conv3_1'][1]) conv3_2 = conv_layer_relu(conv3_1, para['conv3_2'][0], para['conv3_2'][1]) conv3_3 = conv_layer_relu(conv3_2, para['conv3_3'][0], para['conv3_3'][1]) conv3_3_ave = ave_pool(conv3_3) conv4_1 = conv_layer_relu(conv3_3_ave, para['conv4_1'][0], para['conv4_1'][1]) conv4_2 = conv_layer_relu(conv4_1, para['conv4_2'][0], para['conv4_2'][1]) conv4_3 = conv_layer_relu(conv4_2, para['conv4_3'][0], para['conv4_3'][1]) f = {} f["conv1_2"] = conv1_2 f["conv2_2"] = conv2_2 f["conv3_3"] = conv3_3 f["conv4_3"] = conv4_3 return f
Example #4
Source File: stylize.py From Chinese-Character-and-Calligraphic-Image-Processing with MIT License | 6 votes |
def VGG(x,para): x = tf.reverse(x, [-1]) - np.array([103.939, 116.779, 123.68]) conv1_1 = conv_layer_relu(x, para['conv1_1'][0], para['conv1_1'][1]) conv1_2 = conv_layer_relu(conv1_1, para['conv1_2'][0], para['conv1_2'][1]) conv1_2_ave = ave_pool(conv1_2) conv2_1 = conv_layer_relu(conv1_2_ave, para['conv2_1'][0], para['conv2_1'][1]) conv2_2 = conv_layer_relu(conv2_1, para['conv2_2'][0], para['conv2_2'][1]) conv2_2_ave = ave_pool(conv2_2) conv3_1 = conv_layer_relu(conv2_2_ave, para['conv3_1'][0], para['conv3_1'][1]) conv3_2 = conv_layer_relu(conv3_1, para['conv3_2'][0], para['conv3_2'][1]) conv3_3 = conv_layer_relu(conv3_2, para['conv3_3'][0], para['conv3_3'][1]) conv3_3_ave = ave_pool(conv3_3) conv4_1 = conv_layer_relu(conv3_3_ave, para['conv4_1'][0], para['conv4_1'][1]) conv4_2 = conv_layer_relu(conv4_1, para['conv4_2'][0], para['conv4_2'][1]) conv4_3 = conv_layer_relu(conv4_2, para['conv4_3'][0], para['conv4_3'][1]) f = {} f["conv1_2"] = conv1_2 f["conv2_2"] = conv2_2 f["conv3_3"] = conv3_3 f["conv4_3"] = conv4_3 return f
Example #5
Source File: ppo.py From fine-lm with MIT License | 6 votes |
def calculate_generalized_advantage_estimator( reward, value, done, gae_gamma, gae_lambda): """Generalized advantage estimator.""" # Below is slight weirdness, we set the last reward to 0. # This makes the advantage to be 0 in the last timestep reward = tf.concat([reward[:-1, :], value[-1:, :]], axis=0) next_value = tf.concat([value[1:, :], tf.zeros_like(value[-1:, :])], axis=0) next_not_done = 1 - tf.cast(tf.concat([done[1:, :], tf.zeros_like(done[-1:, :])], axis=0), tf.float32) delta = reward + gae_gamma * next_value * next_not_done - value return_ = tf.reverse(tf.scan( lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg, [tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])], tf.zeros_like(delta[0, :]), parallel_iterations=1), [0]) return tf.check_numerics(return_, "return")
Example #6
Source File: augmentation.py From ffn with Apache License 2.0 | 6 votes |
def reflection(data, decision): """Conditionally reflects the data in XYZ. Args: data: input tensor, shape: [..], z, y, x, c decision: boolean tensor, shape 3, indicating on which spatial dimensions to apply the reflection (x, y, z) Returns: TF op to conditionally apply reflection. """ with tf.name_scope('augment_reflection'): rank = data.get_shape().ndims spatial_dims = tf.constant([rank - 2, rank - 3, rank - 4]) selected_dims = tf.boolean_mask(spatial_dims, decision) return tf.reverse(data, selected_dims)
Example #7
Source File: rnn.py From inferbeddings with MIT License | 6 votes |
def bw_dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=False, scope=None): assert not time_major # TODO : to be implemented later! flat_inputs = flatten(inputs, 2) # [-1, J, d] flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64') flat_inputs = tf.reverse(flat_inputs, 1) if sequence_length is None \ else tf.reverse_sequence(flat_inputs, sequence_length, 1) flat_outputs, final_state = tf.nn.dynamic_rnn(cell, flat_inputs, sequence_length=flat_len, initial_state=initial_state, dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory, time_major=time_major, scope=scope) flat_outputs = tf.reverse(flat_outputs, 1) if sequence_length is None \ else tf.reverse_sequence(flat_outputs, sequence_length, 1) outputs = reconstruct(flat_outputs, inputs, 2) return outputs, final_state
Example #8
Source File: rewards.py From Counterfactual-StoryRW with MIT License | 6 votes |
def _discount_reward_tensor_1d(reward, sequence_length, discount=1., dtype=None): if sequence_length is None: raise ValueError('sequence_length must not be `None` for 1D reward.') batch_size = tf.shape(reward)[0] max_seq_length = tf.reduce_max(sequence_length) dtype = dtype or reward.dtype if discount == 1.: dmat = tf.ones( tf.concat([[batch_size], [max_seq_length]], 0), dtype=dtype) else: mask = tf.sequence_mask(sequence_length, dtype=dtype) mask = tf.concat([mask[:, 1:], tf.zeros_like(mask[:, -1:])], axis=1) # Make each row = [discount, ..., discount, 1, ..., 1] dmat = mask * discount + (1 - mask) dmat = tf.cumprod(dmat, axis=1, reverse=True) disc_reward = dmat * tf.expand_dims(reward, -1) disc_reward = mask_sequences( disc_reward, sequence_length, dtype=dtype, tensor_rank=2) return disc_reward
Example #9
Source File: rewards.py From Counterfactual-StoryRW with MIT License | 6 votes |
def _discount_reward_tensor_2d(reward, sequence_length=None, discount=1., dtype=None): if sequence_length is not None: reward = mask_sequences( reward, sequence_length, dtype=dtype, tensor_rank=2) if discount == 1.: disc_reward = tf.cumsum(reward, axis=1, reverse=True) else: # [max_time, batch_size] rev_reward_T = tf.transpose(tf.reverse(reward, [1]), [1, 0]) rev_reward_T_cum = tf.scan( fn=lambda acc, cur: cur + discount * acc, elems=rev_reward_T, initializer=tf.zeros_like(reward[:, 1]), back_prop=False) disc_reward = tf.reverse( tf.transpose(rev_reward_T_cum, [1, 0]), [1]) return disc_reward
Example #10
Source File: vgg16.py From densecap-tensorflow with MIT License | 6 votes |
def fix_variables(self, sess, pretrained_model): print('Fix VGG16 layers..') with tf.variable_scope('Fix_VGG16') as scope: with tf.device("/cpu:0"): # fix the vgg16 issue from conv weights to fc weights # fix RGB to BGR fc6_conv = tf.get_variable("fc6_conv", [7, 7, 512, 4096], trainable=False) fc7_conv = tf.get_variable("fc7_conv", [1, 1, 4096, 4096], trainable=False) conv1_rgb = tf.get_variable("conv1_rgb", [3, 3, 3, 64], trainable=False) restorer_fc = tf.train.Saver({self._vgg_scope + "/fc6/weights": fc6_conv, self._vgg_scope + "/fc7/weights": fc7_conv, self._vgg_scope + "/conv1/conv1_1/weights": conv1_rgb}) restorer_fc.restore(sess, pretrained_model) sess.run(tf.assign(self._variables_to_fix[self._vgg_scope + '/fc6/weights:0'], tf.reshape(fc6_conv, self._variables_to_fix[ self._vgg_scope + '/fc6/weights:0'].get_shape()))) sess.run(tf.assign(self._variables_to_fix[self._vgg_scope + '/fc7/weights:0'], tf.reshape(fc7_conv, self._variables_to_fix[ self._vgg_scope + '/fc7/weights:0'].get_shape()))) sess.run(tf.assign(self._variables_to_fix[self._vgg_scope + '/conv1/conv1_1/weights:0'], tf.reverse(conv1_rgb, [2])))
Example #11
Source File: models.py From miccai-2016-surgical-activity-rec with Apache License 2.0 | 6 votes |
def _compute_rnn_outputs(self): reversed_inputs = tf.reverse(self.inputs, [False, True, False]) reversed_resets = tf.reverse(self.resets, [False, True, False]) with tf.variable_scope('fw'): self._fw_lstm = LSTM(self.inputs, self.resets, self.training, self.num_layers, self.hidden_layer_size, self.init_scale, self.dropout_keep_prob) with tf.variable_scope('rv'): self._rv_lstm = LSTM(reversed_inputs, reversed_resets, self.training, self.num_layers, self.hidden_layer_size, self.init_scale, self.dropout_keep_prob) fw_outputs = self._fw_lstm.outputs rv_outputs = tf.reverse(self._rv_lstm.outputs, [False, True, False]) outputs = tf.concat(2, [fw_outputs, rv_outputs]) return outputs
Example #12
Source File: augmentation.py From ffn with Apache License 2.0 | 6 votes |
def __call__(self, x): """Applies the sampled permutation and reflection to `x`. Args: x: A Tensor of rank `self.rank`. Returns: The transformed Tensor, retaining as much static shape information as possible. """ x = tf.convert_to_tensor(x) with tf.name_scope('permute_and_reflect'): if self.permutable_axes.size > 0: x = permute_axes(x, self.full_permutation, self.permutable_axes) if self.reflectable_axes.size > 0: x = tf.reverse(x, self.reflected_axes) return x
Example #13
Source File: augmentation.py From basenji with Apache License 2.0 | 6 votes |
def augment_stochastic(data_ops, augment_rc=False, augment_shifts=[]): """Apply stochastic augmentations, Args: data_ops: dict with keys 'sequence,' 'label,' and 'na.' augment_rc: Boolean for whether to apply reverse complement augmentation. augment_shifts: list of int offsets to sample shift augmentations. Returns: data_ops_aug: augmented data """ if augment_shifts: data_ops['sequence'] = augment_stochastic_shifts(data_ops['sequence'], augment_shifts) if augment_rc: data_ops = augment_stochastic_rc(data_ops) else: data_ops['reverse_preds'] = tf.zeros((), dtype=tf.bool) return data_ops
Example #14
Source File: ops.py From basenji with Apache License 2.0 | 6 votes |
def reverse_complement(input_seq, lengths=None): # TODO(dbelanger) remove dependencies on this method, # as it is easy to mis-use in ways that lead to buggy results. """Reverse complement a list of one hot coded nucleotide Tensors. Args: input_seq: Sequence of seq_len tensors of dimension (batch_size, 4) lengths: A `Tensor` of dimension batch_size, containing lengths for each sequence in the batch. If "None" is specified, simply reverse complements the list. Returns: reverse complemented sequence """ if lengths is not None: print("Not yet implemented", file=sys.stderr) exit(1) else: nt_rc = tf.constant( [[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]], dtype="float32") return [tf.matmul(ris, nt_rc) for ris in reversed(input_seq)]
Example #15
Source File: rnn.py From BERT with Apache License 2.0 | 6 votes |
def bw_dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=False, scope=None): assert not time_major # TODO : to be implemented later! flat_inputs = flatten(inputs, 2) # [-1, J, d] flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64') flat_inputs = tf.reverse(flat_inputs, 1) if sequence_length is None \ else tf.reverse_sequence(flat_inputs, sequence_length, 1) flat_outputs, final_state = _dynamic_rnn(cell, flat_inputs, sequence_length=flat_len, initial_state=initial_state, dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory, time_major=time_major, scope=scope) flat_outputs = tf.reverse(flat_outputs, 1) if sequence_length is None \ else tf.reverse_sequence(flat_outputs, sequence_length, 1) outputs = reconstruct(flat_outputs, inputs, 2) return outputs, final_state
Example #16
Source File: ppo.py From BERT with Apache License 2.0 | 6 votes |
def calculate_generalized_advantage_estimator( reward, value, done, gae_gamma, gae_lambda): # pylint: disable=g-doc-args """Generalized advantage estimator. Returns: GAE estimator. It will be one element shorter than the input; this is because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N]. """ # pylint: enable=g-doc-args next_value = value[1:, :] next_not_done = 1 - tf.cast(done[1:, :], tf.float32) delta = (reward[:-1, :] + gae_gamma * next_value * next_not_done - value[:-1, :]) return_ = tf.reverse(tf.scan( lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg, [tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])], tf.zeros_like(delta[0, :]), parallel_iterations=1), [0]) return tf.check_numerics(return_, "return")
Example #17
Source File: arg_max.py From onnx-tensorflow with Apache License 2.0 | 6 votes |
def _common(cls, node, **kwargs): axis = node.attrs.get("axis", 0) keepdims = node.attrs.get("keepdims", 1) select_last_index = node.attrs.get("select_last_index", 0) if select_last_index == 0: arg_max = cls.make_tensor_from_onnx_node(node, **kwargs) else: # reverse the input and apply argmax on that to get last occurrence of max x = kwargs["tensor_dict"][node.inputs[0]] x = tf.reverse(x, axis=[axis]) arg_max = cls.make_tensor_from_onnx_node(node, inputs=[x], **kwargs) # adjust indices to account for the reverse arg_max = tf_shape(x)[axis] - arg_max - 1 if keepdims == 1: return [tf.expand_dims(arg_max, axis=axis)] return [arg_max]
Example #18
Source File: arg_min.py From onnx-tensorflow with Apache License 2.0 | 6 votes |
def _common(cls, node, **kwargs): axis = node.attrs.get("axis", 0) keepdims = node.attrs.get("keepdims", 1) select_last_index = node.attrs.get("select_last_index", 0) if select_last_index == 0: arg_min = cls.make_tensor_from_onnx_node(node, **kwargs) else: # reverse the input and apply argmax on that to get last occurrence of max x = kwargs["tensor_dict"][node.inputs[0]] x = tf.reverse(x, axis=[axis]) arg_min = cls.make_tensor_from_onnx_node(node, inputs=[x], **kwargs) # adjust indices to account for the reverse arg_min = tf_shape(x)[axis] - arg_min - 1 if keepdims == 1: return [tf.expand_dims(arg_min, axis=axis)] return [arg_min]
Example #19
Source File: layers.py From face_landmark_dnn with MIT License | 6 votes |
def LandmarkImageLayer(Landmarks): def draw_landmarks(L): def draw_landmarks_helper(Point): intLandmark = tf.to_int32(Point) locations = Offsets + intLandmark dxdy = Point - tf.to_float(intLandmark) offsetsSubPix = tf.to_float(Offsets) - dxdy vals = 1 / (1 + tf.norm(offsetsSubPix, axis=2)) img = tf.scatter_nd(locations, vals, shape=(IMGSIZE, IMGSIZE)) return img Landmark = tf.reverse(tf.reshape(L, [-1,2]), [-1]) # Landmark = tf.reshape(L, (-1, 2)) Landmark = tf.clip_by_value(Landmark, HalfSize, IMGSIZE - 1 - HalfSize) # Ret = 1 / (tf.norm(tf.map_fn(DoIn,Landmarks),axis = 3) + 1) Ret = tf.map_fn(draw_landmarks_helper, Landmark) Ret = tf.reshape(tf.reduce_max(Ret, axis=0), [IMGSIZE, IMGSIZE, 1]) return Ret return tf.map_fn(draw_landmarks, Landmarks)
Example #20
Source File: resnet_v1.py From RGB-N with MIT License | 5 votes |
def fix_variables(self, sess, pretrained_model): print('Fix Resnet V1 layers..') with tf.variable_scope('Fix_Resnet_V1') as scope: with tf.device("/cpu:0"): # fix RGB to BGR conv1_rgb = tf.get_variable("conv1_rgb", [7, 7, 3, 64], trainable=False) restorer_fc = tf.train.Saver({self._resnet_scope + "/conv1/weights": conv1_rgb}) restorer_fc.restore(sess, pretrained_model) sess.run(tf.assign(self._variables_to_fix[self._resnet_scope + '/conv1/weights:0'], tf.reverse(conv1_rgb, [False,False,True,False])))
Example #21
Source File: resnet_v1.py From SSH-TensorFlow with MIT License | 5 votes |
def fix_variables(self, sess, pretrained_model): print('Fix Resnet V1 layers..') with tf.variable_scope('Fix_Resnet_V1') as scope: with tf.device("/cpu:0"): # fix RGB to BGR conv1_rgb = tf.get_variable("conv1_rgb", [7, 7, 3, 64], trainable=False) restorer_fc = tf.train.Saver({self._scope + "/conv1/weights": conv1_rgb}) restorer_fc.restore(sess, pretrained_model) sess.run(tf.assign(self._variables_to_fix[self._scope + '/conv1/weights:0'], tf.reverse(conv1_rgb, [2])))
Example #22
Source File: layers.py From face_landmark_dnn with MIT License | 5 votes |
def AffineTransformLayer(Image, Param): ''' Image: [N, IMGSIZE, IMGSIZE, 2] Param: [N, 6] return: [N, IMGSIZE, IMGSIZE, 2] ''' A = tf.reshape(Param[:, 0:4], (-1, 2, 2)) T = tf.reshape(Param[:, 4:6], (-1, 1, 2)) A = tf.matrix_inverse(A) T = tf.matmul(-T, A) T = tf.reverse(T, (-1,)) A = tf.matrix_transpose(A) def affine_transform(I, A, T): I = tf.reshape(I, [IMGSIZE, IMGSIZE]) SrcPixels = tf.matmul(tf.reshape(Pixels, [IMGSIZE * IMGSIZE,2]), A) + T SrcPixels = tf.clip_by_value(SrcPixels, 0, IMGSIZE - 2) outPixelsMinMin = tf.to_float(tf.to_int32(SrcPixels)) dxdy = SrcPixels - outPixelsMinMin dx = dxdy[:, 0] dy = dxdy[:, 1] outPixelsMinMin = tf.reshape(tf.to_int32(outPixelsMinMin),[IMGSIZE * IMGSIZE, 2]) outPixelsMaxMin = tf.reshape(outPixelsMinMin + [1, 0], [IMGSIZE * IMGSIZE, 2]) outPixelsMinMax = tf.reshape(outPixelsMinMin + [0, 1], [IMGSIZE * IMGSIZE, 2]) outPixelsMaxMax = tf.reshape(outPixelsMinMin + [1, 1], [IMGSIZE * IMGSIZE, 2]) OutImage = (1 - dx) * (1 - dy) * tf.gather_nd(I, outPixelsMinMin) + dx * (1 - dy) * tf.gather_nd(I, outPixelsMaxMin) \ + (1 - dx) * dy * tf.gather_nd(I, outPixelsMinMax) + dx * dy * tf.gather_nd(I, outPixelsMaxMax) return tf.reshape(OutImage,[IMGSIZE,IMGSIZE,1]) return tf.map_fn(lambda args: affine_transform(args[0], args[1], args[2]),(Image, A, T), dtype=tf.float32)
Example #23
Source File: mobilenet_v2.py From SSH-TensorFlow with MIT License | 5 votes |
def fix_variables(self, sess, pretrained_model): print('Fix MobileNet V2 layers..') with tf.variable_scope('Fix_MobileNet_V2') as scope: with tf.device("/cpu:0"): # fix RGB to BGR, and match the scale by (255.0 / 2.0) Conv_rgb = tf.get_variable("Conv_rgb", [3, 3, 3, max(int(32 * self._depth_multiplier), 8)], trainable=False) restorer_fc = tf.train.Saver({self._scope + "/Conv/weights": Conv_rgb}) restorer_fc.restore(sess, pretrained_model) sess.run(tf.assign(self._variables_to_fix[self._scope + "/Conv/weights:0"], tf.reverse(Conv_rgb / (255.0 / 2.0), [2])))
Example #24
Source File: lip_reader.py From LIP_JPPNet with MIT License | 5 votes |
def image_mirroring(img, label, label_rev, heatmap, heatmap_rev): """ Randomly mirrors the images. Args: img: Training image to mirror. label: Segmentation mask to mirror. """ distort_left_right_random = tf.random_uniform([1], 0, 1.0, dtype=tf.float32)[0] mirror = tf.less(tf.stack([1.0, distort_left_right_random, 1.0]), 0.5) mirror = tf.boolean_mask([0, 1, 2], mirror) img = tf.reverse(img, mirror) flag = tf.less(distort_left_right_random, 0.5) mask = tf.stack([tf.logical_not(flag), flag]) label_and_rev = tf.stack([label, label_rev]) label_ = tf.boolean_mask(label_and_rev, mask) label_ = tf.reshape(label_, tf.shape(label)) heatmap_and_rev = tf.stack([heatmap, heatmap_rev]) heatmap_ = tf.boolean_mask(heatmap_and_rev, mask) heatmap_ = tf.reshape(heatmap_, tf.shape(heatmap)) return img, label_, heatmap_
Example #25
Source File: adv_utils.py From generative_adversary with GNU General Public License v3.0 | 5 votes |
def random_flip_left_right(images): images_flipped = tf.reverse(images, axis=[2]) flip = tf.cast(tf.contrib.distributions.Bernoulli(probs=tf.ones((tf.shape(images)[0],)) * 0.5).sample(), tf.bool) final_images = tf.where(flip, x=images, y=images_flipped) return final_images
Example #26
Source File: resnet_v1_noise.py From RGB-N with MIT License | 5 votes |
def fix_variables(self, sess, pretrained_model): print('not Fix Resnet V1 layers..') with tf.variable_scope('Fix_Resnet_V1') as scope: with tf.device("/cpu:0"): # fix RGB to BGR conv1_rgb = tf.get_variable("conv1_rgb", [7, 7, 3, 64], trainable=True) restorer_fc = tf.train.Saver({self._resnet_scope + "/conv1/weights": conv1_rgb}) restorer_fc.restore(sess, pretrained_model) sess.run(tf.assign(self._variables_to_fix[self._resnet_scope + '/conv1/weights:0'], tf.reverse(conv1_rgb, [False,False,True,False])))
Example #27
Source File: resnet_fusion_noise.py From RGB-N with MIT License | 5 votes |
def fix_variables(self, sess, pretrained_model): print('not Fix Resnet V1 layers..') with tf.variable_scope('Fix_Resnet_V1') as scope: with tf.device("/cpu:0"): # fix RGB to BGR conv1_rgb = tf.get_variable("conv1_rgb", [7, 7, 3, 64], trainable=False) restorer_fc = tf.train.Saver({self._resnet_scope + "/conv1/weights": conv1_rgb}) restorer_fc.restore(sess, pretrained_model) sess.run(tf.assign(self._variables_to_fix[self._resnet_scope + '/conv1/weights:0'], tf.reverse(conv1_rgb, [False,False,True,False])))
Example #28
Source File: ppo.py From BERT with Apache License 2.0 | 5 votes |
def discounted_rewards(reward, done, gae_gamma, end_values): """Discounted rewards.""" not_done = tf.expand_dims(1 - tf.cast(done, tf.float32), axis=2) end_values = end_values * not_done[-1, :, :] return_ = tf.scan( lambda agg, cur: cur + gae_gamma * agg, tf.expand_dims(reward, axis=2) * not_done, initializer=end_values, reverse=True, back_prop=False, parallel_iterations=2) return tf.check_numerics(return_, "return")
Example #29
Source File: neural_stack.py From BERT with Apache License 2.0 | 5 votes |
def build_read_mask(self): """Uses mask_pos_lt() instead of mask_pos_gt() to reverse read values. Returns: A tf.float32 tensor of shape [1, memory_size, memory_size]. """ return common_layers.mask_pos_lt(self._memory_size, self._memory_size)
Example #30
Source File: resnet_fusion.py From RGB-N with MIT License | 5 votes |
def fix_variables(self, sess, pretrained_model): print('not Fix Resnet V1 layers..') with tf.variable_scope('Fix_Resnet_V1') as scope: with tf.device("/cpu:0"): # fix RGB to BGR conv1_rgb = tf.get_variable("conv1_rgb", [7, 7, 3, 64], trainable=False) restorer_fc = tf.train.Saver({self._resnet_scope + "/conv1/weights": conv1_rgb}) restorer_fc.restore(sess, pretrained_model) sess.run(tf.assign(self._variables_to_fix[self._resnet_scope + '/conv1/weights:0'], tf.reverse(conv1_rgb, [False,False,True,False])))