Python tensorflow.control_dependencies() Examples
The following are 30
code examples of tensorflow.control_dependencies().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: tfutil.py From disentangling_conditional_gans with MIT License | 6 votes |
def init_uninited_vars(vars=None): if vars is None: vars = tf.global_variables() test_vars = []; test_ops = [] with tf.control_dependencies(None): # ignore surrounding control_dependencies for var in vars: assert is_tf_expression(var) try: tf.get_default_graph().get_tensor_by_name(var.name.replace(':0', '/IsVariableInitialized:0')) except KeyError: # Op does not exist => variable may be uninitialized. test_vars.append(var) with absolute_name_scope(var.name.split(':')[0]): test_ops.append(tf.is_variable_initialized(var)) init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited] run([var.initializer for var in init_vars]) #---------------------------------------------------------------------------- # Set the values of given tf.Variables. # Equivalent to the following, but more efficient and does not bloat the tf graph: # tfutil.run([tf.assign(var, value) for var, value in var_to_value_dict.items()]
Example #2
Source File: memory.py From DOTA_models with Apache License 2.0 | 6 votes |
def make_update_op(self, upd_idxs, upd_keys, upd_vals, batch_size, use_recent_idx, intended_output): """Function that creates all the update ops.""" mem_age_incr = self.mem_age.assign_add(tf.ones([self.memory_size], dtype=tf.float32)) with tf.control_dependencies([mem_age_incr]): mem_age_upd = tf.scatter_update( self.mem_age, upd_idxs, tf.zeros([batch_size], dtype=tf.float32)) mem_key_upd = tf.scatter_update( self.mem_keys, upd_idxs, upd_keys) mem_val_upd = tf.scatter_update( self.mem_vals, upd_idxs, upd_vals) if use_recent_idx: recent_idx_upd = tf.scatter_update( self.recent_idx, intended_output, upd_idxs) else: recent_idx_upd = tf.group() return tf.group(mem_age_upd, mem_key_upd, mem_val_upd, recent_idx_upd)
Example #3
Source File: data_provider.py From DOTA_models with Apache License 2.0 | 6 votes |
def central_crop(image, crop_size): """Returns a central crop for the specified size of an image. Args: image: A tensor with shape [height, width, channels] crop_size: A tuple (crop_width, crop_height) Returns: A tensor of shape [crop_height, crop_width, channels]. """ with tf.variable_scope('CentralCrop'): target_width, target_height = crop_size image_height, image_width = tf.shape(image)[0], tf.shape(image)[1] assert_op1 = tf.Assert( tf.greater_equal(image_height, target_height), ['image_height < target_height', image_height, target_height]) assert_op2 = tf.Assert( tf.greater_equal(image_width, target_width), ['image_width < target_width', image_width, target_width]) with tf.control_dependencies([assert_op1, assert_op2]): offset_width = (image_width - target_width) / 2 offset_height = (image_height - target_height) / 2 return tf.image.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width)
Example #4
Source File: algorithm.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def _define_experience(self, observ, action, reward): """Implement the branch of experience() entered during training.""" update_filters = tf.summary.merge([ self._observ_filter.update(observ), self._reward_filter.update(reward)]) with tf.control_dependencies([update_filters]): if self._config.train_on_agent_action: # NOTE: Doesn't seem to change much. action = self._last_action batch = observ, action, self._last_mean, self._last_logstd, reward append = self._episodes.append(batch, tf.range(len(self._batch_env))) with tf.control_dependencies([append]): norm_observ = self._observ_filter.transform(observ) norm_reward = tf.reduce_mean(self._reward_filter.transform(reward)) # pylint: disable=g-long-lambda summary = tf.cond(self._should_log, lambda: tf.summary.merge([ update_filters, self._observ_filter.summary(), self._reward_filter.summary(), tf.summary.scalar('memory_size', self._memory_index), tf.summary.histogram('normalized_observ', norm_observ), tf.summary.histogram('action', self._last_action), tf.summary.scalar('normalized_reward', norm_reward)]), str) return summary
Example #5
Source File: algorithm.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def _update_value_step(self, observ, reward, length): """Compute the current value loss and perform a gradient update step. Args: observ: Sequences of observations. reward: Sequences of reward. length: Batch of sequence lengths. Returns: Tuple of loss tensor and summary tensor. """ loss, summary = self._value_loss(observ, reward, length) gradients, variables = ( zip(*self._value_optimizer.compute_gradients(loss))) optimize = self._value_optimizer.apply_gradients( zip(gradients, variables)) summary = tf.summary.merge([ summary, tf.summary.scalar('gradient_norm', tf.global_norm(gradients)), utility.gradient_summaries( zip(gradients, variables), dict(value=r'.*'))]) with tf.control_dependencies([optimize]): return [tf.identity(loss), tf.identity(summary)]
Example #6
Source File: graphs.py From DOTA_models with Apache License 2.0 | 6 votes |
def _lm_loss(self, inputs, emb_key='lm_embedded', lstm_layer='lstm', lm_loss_layer='lm_loss', loss_name='lm_loss', compute_loss=True): embedded = self.layers['embedding'](inputs.tokens) self.tensors[emb_key] = embedded lstm_out, next_state = self.layers[lstm_layer](embedded, inputs.state, inputs.length) if compute_loss: loss = self.layers[lm_loss_layer]( [lstm_out, inputs.labels, inputs.weights]) with tf.control_dependencies([inputs.save_state(next_state)]): loss = tf.identity(loss) tf.summary.scalar(loss_name, loss) return loss
Example #7
Source File: memory.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def replace(self, episodes, length, rows=None): """Replace full episodes. Args: episodes: Tuple of transition quantities with batch and time dimensions. length: Batch of sequence lengths. rows: Episodes to replace, defaults to all. Returns: Operation. """ rows = tf.range(self._capacity) if rows is None else rows assert rows.shape.ndims == 1 assert_capacity = tf.assert_less( rows, self._capacity, message='capacity exceeded') with tf.control_dependencies([assert_capacity]): assert_max_length = tf.assert_less_equal( length, self._max_length, message='max length exceeded') replace_ops = [] with tf.control_dependencies([assert_max_length]): for buffer_, elements in zip(self._buffers, episodes): replace_op = tf.scatter_update(buffer_, rows, elements) replace_ops.append(replace_op) with tf.control_dependencies(replace_ops): return tf.scatter_update(self._length, rows, length)
Example #8
Source File: component.py From DOTA_models with Apache License 2.0 | 6 votes |
def build_structured_training(self, state, network_states): """Builds a beam search based training loop for this component. The default implementation builds a dummy graph and raises a TensorFlow runtime exception to indicate that structured training is not implemented. Args: state: MasterState from the 'AdvanceMaster' op that advances the underlying master to this component. network_states: dictionary of component NetworkState objects. Returns: (handle, cost, correct, total) -- These are TF ops corresponding to the final handle after unrolling, the total cost, and the total number of actions. Since the number of correctly predicted actions is not applicable in the structured training setting, a dummy value should returned. """ del network_states # Unused. with tf.control_dependencies([tf.Assert(False, ['Not implemented.'])]): handle = tf.identity(state.handle) cost = tf.constant(0.) correct, total = tf.constant(0), tf.constant(0) return handle, cost, correct, total
Example #9
Source File: in_graph_batch_env.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def reset(self, indices=None): """Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations. """ if indices is None: indices = tf.range(len(self._batch_env)) observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ = tf.py_func( self._batch_env.reset, [indices], observ_dtype, name='reset') observ = tf.check_numerics(observ, 'observ') reward = tf.zeros_like(indices, tf.float32) done = tf.zeros_like(indices, tf.bool) with tf.control_dependencies([ tf.scatter_update(self._observ, indices, observ), tf.scatter_update(self._reward, indices, reward), tf.scatter_update(self._done, indices, done)]): return tf.identity(observ)
Example #10
Source File: algorithm.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def begin_episode(self, agent_indices): """Reset the recurrent states and stored episode. Args: agent_indices: Tensor containing current batch indices. Returns: Summary tensor. """ with tf.name_scope('begin_episode/'): if self._last_state is None: reset_state = tf.no_op() else: reset_state = utility.reinit_nested_vars( self._last_state, agent_indices) reset_buffer = self._episodes.clear(agent_indices) with tf.control_dependencies([reset_state, reset_buffer]): return tf.constant('')
Example #11
Source File: losses.py From DOTA_models with Apache License 2.0 | 6 votes |
def mmd_loss(source_samples, target_samples, weight, scope=None): """Adds a similarity loss term, the MMD between two representations. This Maximum Mean Discrepancy (MMD) loss is calculated with a number of different Gaussian kernels. Args: source_samples: a tensor of shape [num_samples, num_features]. target_samples: a tensor of shape [num_samples, num_features]. weight: the weight of the MMD loss. scope: optional name scope for summary tags. Returns: a scalar tensor representing the MMD loss value. """ sigmas = [ 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100, 1e3, 1e4, 1e5, 1e6 ] gaussian_kernel = partial( utils.gaussian_kernel_matrix, sigmas=tf.constant(sigmas)) loss_value = maximum_mean_discrepancy( source_samples, target_samples, kernel=gaussian_kernel) loss_value = tf.maximum(1e-4, loss_value) * weight assert_op = tf.Assert(tf.is_finite(loss_value), [loss_value]) with tf.control_dependencies([assert_op]): tag = 'MMD Loss' if scope: tag = scope + tag tf.summary.scalar(tag, loss_value) tf.losses.add_loss(loss_value) return loss_value
Example #12
Source File: tfutil.py From disentangling_conditional_gans with MIT License | 6 votes |
def autosummary(name, value): id = name.replace('/', '_') if is_tf_expression(value): with tf.name_scope('summary_' + id), tf.device(value.device): update_op = _create_autosummary_var(name, value) with tf.control_dependencies([update_op]): return tf.identity(value) else: # python scalar or numpy array if name not in _autosummary_immediate: with absolute_name_scope('Autosummary/' + id), tf.device(None), tf.control_dependencies(None): update_value = tf.placeholder(tf.float32) update_op = _create_autosummary_var(name, update_value) _autosummary_immediate[name] = update_op, update_value update_op, update_value = _autosummary_immediate[name] run(update_op, {update_value: np.float32(value)}) return value # Create the necessary ops to include autosummaries in TensorBoard report. # Note: This should be done only once per graph.
Example #13
Source File: tfutil.py From disentangling_conditional_gans with MIT License | 6 votes |
def finalize_autosummaries(): global _autosummary_finalized if _autosummary_finalized: return _autosummary_finalized = True init_uninited_vars([var for vars in _autosummary_vars.values() for var in vars]) with tf.device(None), tf.control_dependencies(None): for name, vars in _autosummary_vars.items(): id = name.replace('/', '_') with absolute_name_scope('Autosummary/' + id): sum = tf.add_n(vars) avg = sum[0] / sum[1] with tf.control_dependencies([avg]): # read before resetting reset_ops = [tf.assign(var, tf.zeros(2)) for var in vars] with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting tf.summary.scalar(name, avg) # Internal helper for creating autosummary accumulators.
Example #14
Source File: in_graph_batch_env.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def reset(self, indices=None): """Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations. """ if indices is None: indices = tf.range(len(self._batch_env)) observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ = tf.py_func( self._batch_env.reset, [indices], observ_dtype, name='reset') observ = tf.check_numerics(observ, 'observ') reward = tf.zeros_like(indices, tf.float32) done = tf.zeros_like(indices, tf.bool) with tf.control_dependencies([ tf.scatter_update(self._observ, indices, observ), tf.scatter_update(self._reward, indices, reward), tf.scatter_update(self._done, indices, done)]): return tf.identity(observ)
Example #15
Source File: tfutil.py From disentangling_conditional_gans with MIT License | 6 votes |
def _create_autosummary_var(name, value_expr): assert not _autosummary_finalized v = tf.cast(value_expr, tf.float32) if v.shape.ndims is 0: v = [v, np.float32(1.0)] elif v.shape.ndims is 1: v = [tf.reduce_sum(v), tf.cast(tf.shape(v)[0], tf.float32)] else: v = [tf.reduce_sum(v), tf.reduce_prod(tf.cast(tf.shape(v), tf.float32))] v = tf.cond(tf.is_finite(v[0]), lambda: tf.stack(v), lambda: tf.zeros(2)) with tf.control_dependencies(None): var = tf.Variable(tf.zeros(2)) # [numerator, denominator] update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v)) if name in _autosummary_vars: _autosummary_vars[name].append(var) else: _autosummary_vars[name] = [var] return update_op #---------------------------------------------------------------------------- # Call filewriter.add_summary() with all summaries in the default graph, # automatically finalizing and merging them on the first call.
Example #16
Source File: losses.py From DOTA_models with Apache License 2.0 | 5 votes |
def difference_loss(private_samples, shared_samples, weight=1.0, name=''): """Adds the difference loss between the private and shared representations. Args: private_samples: a tensor of shape [num_samples, num_features]. shared_samples: a tensor of shape [num_samples, num_features]. weight: the weight of the incoherence loss. name: the name of the tf summary. """ private_samples -= tf.reduce_mean(private_samples, 0) shared_samples -= tf.reduce_mean(shared_samples, 0) private_samples = tf.nn.l2_normalize(private_samples, 1) shared_samples = tf.nn.l2_normalize(shared_samples, 1) correlation_matrix = tf.matmul( private_samples, shared_samples, transpose_a=True) cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight cost = tf.where(cost > 0, cost, 0, name='value') tf.summary.scalar('losses/Difference Loss {}'.format(name), cost) assert_op = tf.Assert(tf.is_finite(cost), [cost]) with tf.control_dependencies([assert_op]): tf.losses.add_loss(cost) ################################################################################ # TASK LOSS ################################################################################
Example #17
Source File: memory.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def append(self, transitions, rows=None): """Append a batch of transitions to rows of the memory. Args: transitions: Tuple of transition quantities with batch dimension. rows: Episodes to append to, defaults to all. Returns: Operation. """ rows = tf.range(self._capacity) if rows is None else rows assert rows.shape.ndims == 1 assert_capacity = tf.assert_less( rows, self._capacity, message='capacity exceeded') with tf.control_dependencies([assert_capacity]): assert_max_length = tf.assert_less( tf.gather(self._length, rows), self._max_length, message='max length exceeded') append_ops = [] with tf.control_dependencies([assert_max_length]): for buffer_, elements in zip(self._buffers, transitions): timestep = tf.gather(self._length, rows) indices = tf.stack([rows, timestep], 1) append_ops.append(tf.scatter_nd_update(buffer_, indices, elements)) with tf.control_dependencies(append_ops): episode_mask = tf.reduce_sum(tf.one_hot( rows, self._capacity, dtype=tf.int32), 0) return self._length.assign_add(episode_mask)
Example #18
Source File: normalize.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def update(self, value): """Update the mean and variance estimates. Args: value: Batch or single value tensor. Returns: Summary tensor. """ with tf.name_scope(self._name + '/update'): if value.shape.ndims == self._mean.shape.ndims: # Add a batch dimension if necessary. value = value[None, ...] count = tf.shape(value)[0] with tf.control_dependencies([self._count.assign_add(count)]): step = tf.cast(self._count, tf.float32) mean_delta = tf.reduce_sum(value - self._mean[None, ...], 0) new_mean = self._mean + mean_delta / step new_mean = tf.cond(self._count > 1, lambda: new_mean, lambda: value[0]) var_delta = ( value - self._mean[None, ...]) * (value - new_mean[None, ...]) new_var_sum = self._var_sum + tf.reduce_sum(var_delta, 0) with tf.control_dependencies([new_mean, new_var_sum]): update = self._mean.assign(new_mean), self._var_sum.assign(new_var_sum) with tf.control_dependencies(update): if value.shape.ndims == 1: value = tf.reduce_mean(value) return self._summary('value', tf.reduce_mean(value))
Example #19
Source File: algorithm.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def _update_step( self, observ, action, old_mean, old_logstd, reward, advantage, length): """Compute the current combined loss and perform a gradient update step. Args: observ: Sequences of observations. action: Sequences of actions. old_mean: Sequences of action means of the behavioral policy. old_logstd: Sequences of action log stddevs of the behavioral policy. reward: Sequences of reward. advantage: Sequences of advantages. length: Batch of sequence lengths. Returns: Tuple of value loss, policy loss, and summary tensor. """ value_loss, value_summary = self._value_loss(observ, reward, length) network = self._network(observ, length) policy_loss, policy_summary = self._policy_loss( network.mean, network.logstd, old_mean, old_logstd, action, advantage, length) value_gradients, value_variables = ( zip(*self._optimizer.compute_gradients(value_loss))) policy_gradients, policy_variables = ( zip(*self._optimizer.compute_gradients(policy_loss))) all_gradients = value_gradients + policy_gradients all_variables = value_variables + policy_variables optimize = self._optimizer.apply_gradients( zip(all_gradients, all_variables)) summary = tf.summary.merge([ value_summary, policy_summary, tf.summary.scalar( 'value_gradient_norm', tf.global_norm(value_gradients)), tf.summary.scalar( 'policy_gradient_norm', tf.global_norm(policy_gradients)), utility.gradient_summaries( zip(value_gradients, value_variables), dict(value=r'.*')), utility.gradient_summaries( zip(policy_gradients, policy_variables), dict(policy=r'.*'))]) with tf.control_dependencies([optimize]): return [tf.identity(x) for x in (value_loss, policy_loss, summary)]
Example #20
Source File: exporter_test.py From DOTA_models with Apache License 2.0 | 5 votes |
def postprocess(self, prediction_dict): with tf.control_dependencies(prediction_dict.values()): postprocessed_tensors = { 'detection_boxes': tf.constant([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], tf.float32), 'detection_scores': tf.constant([[0.7, 0.6]], tf.float32), 'detection_classes': tf.constant([[0, 1]], tf.float32), 'num_detections': tf.constant([2], tf.float32) } if self._add_detection_masks: postprocessed_tensors['detection_masks'] = tf.constant( np.arange(32).reshape([2, 4, 4]), tf.float32) return postprocessed_tensors
Example #21
Source File: algorithm.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def perform(self, observ): """Compute batch of actions and a summary for a batch of observation. Args: observ: Tensor of a batch of observations for all agents. Returns: Tuple of action batch tensor and summary tensor. """ with tf.name_scope('perform/'): observ = self._observ_filter.transform(observ) network = self._network( observ[:, None], tf.ones(observ.shape[0]), self._last_state) action = tf.cond( self._is_training, network.policy.sample, lambda: network.mean) logprob = network.policy.log_prob(action)[:, 0] # pylint: disable=g-long-lambda summary = tf.cond(self._should_log, lambda: tf.summary.merge([ tf.summary.histogram('mean', network.mean[:, 0]), tf.summary.histogram('std', tf.exp(network.logstd[:, 0])), tf.summary.histogram('action', action[:, 0]), tf.summary.histogram('logprob', logprob)]), str) # Remember current policy to append to memory in the experience callback. with tf.control_dependencies([ utility.assign_nested_vars(self._last_state, network.state), self._last_action.assign(action[:, 0]), self._last_mean.assign(network.mean[:, 0]), self._last_logstd.assign(network.logstd[:, 0])]): return tf.check_numerics(action[:, 0], 'action'), tf.identity(summary)
Example #22
Source File: algorithm.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def begin_episode(self, agent_indices): """Reset the recurrent states and stored episode. Args: agent_indices: 1D tensor of batch indices for agents starting an episode. Returns: Summary tensor. """ with tf.name_scope('begin_episode/'): reset_state = utility.reinit_nested_vars(self._last_state, agent_indices) reset_buffer = self._episodes.clear(agent_indices) with tf.control_dependencies([reset_state, reset_buffer]): return tf.constant('')
Example #23
Source File: anchor_generator.py From DOTA_models with Apache License 2.0 | 5 votes |
def generate(self, feature_map_shape_list, **params): """Generates a collection of bounding boxes to be used as anchors. TODO: remove **params from argument list and make stride and offsets (for multiple_grid_anchor_generator) constructor arguments. Args: feature_map_shape_list: list of (height, width) pairs in the format [(height_0, width_0), (height_1, width_1), ...] that the generated anchors must align with. Pairs can be provided as 1-dimensional integer tensors of length 2 or simply as tuples of integers. **params: parameters for anchor generation op Returns: boxes: a BoxList holding a collection of N anchor boxes Raises: ValueError: if the number of feature map shapes does not match the length of NumAnchorsPerLocation. """ if self.check_num_anchors and ( len(feature_map_shape_list) != len(self.num_anchors_per_location())): raise ValueError('Number of feature maps is expected to equal the length ' 'of `num_anchors_per_location`.') with tf.name_scope(self.name_scope()): anchors = self._generate(feature_map_shape_list, **params) if self.check_num_anchors: with tf.control_dependencies([ self._assert_correct_number_of_anchors( anchors, feature_map_shape_list)]): anchors.set(tf.identity(anchors.get())) return anchors
Example #24
Source File: tfutil.py From disentangling_conditional_gans with MIT License | 5 votes |
def set_vars(var_to_value_dict): ops = [] feed_dict = {} for var, value in var_to_value_dict.items(): assert is_tf_expression(var) try: setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(':0', '/setter:0')) # look for existing op except KeyError: with absolute_name_scope(var.name.split(':')[0]): with tf.control_dependencies(None): # ignore surrounding control_dependencies setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, 'new_value'), name='setter') # create new setter ops.append(setter) feed_dict[setter.op.inputs[1]] = value run(ops, feed_dict) #---------------------------------------------------------------------------- # Autosummary creates an identity op that internally keeps track of the input # values and automatically shows up in TensorBoard. The reported value # represents an average over input components. The average is accumulated # constantly over time and flushed when save_summaries() is called. # # Notes: # - The output tensor must be used as an input for something else in the # graph. Otherwise, the autosummary op will not get executed, and the average # value will not get accumulated. # - It is perfectly fine to include autosummaries with the same name in # several places throughout the graph, even if they are executed concurrently. # - It is ok to also pass in a python scalar or numpy array. In this case, it # is added to the average immediately.
Example #25
Source File: keypoint_ops.py From DOTA_models with Apache License 2.0 | 5 votes |
def to_absolute_coordinates(keypoints, height, width, check_range=True, scope=None): """Converts normalized keypoint coordinates to absolute pixel coordinates. This function raises an assertion failed error when the maximum keypoint coordinate value is larger than 1.01 (in which case coordinates are already absolute). Args: keypoints: A tensor of shape [num_instances, num_keypoints, 2] height: Maximum value for y coordinate of absolute keypoint coordinates. width: Maximum value for x coordinate of absolute keypoint coordinates. check_range: If True, checks if the coordinates are normalized or not. scope: name scope. Returns: tensor of shape [num_instances, num_keypoints, 2] with absolute coordinates in terms of the image size. """ with tf.name_scope(scope, 'ToAbsoluteCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) # Ensure range of input keypoints is correct. if check_range: max_val = tf.reduce_max(keypoints) max_assert = tf.Assert(tf.greater_equal(1.01, max_val), ['maximum keypoint coordinate value is larger ' 'than 1.01: ', max_val]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(keypoints, height, width)
Example #26
Source File: losses.py From DOTA_models with Apache License 2.0 | 5 votes |
def correlation_loss(source_samples, target_samples, weight, scope=None): """Adds a similarity loss term, the correlation between two representations. Args: source_samples: a tensor of shape [num_samples, num_features] target_samples: a tensor of shape [num_samples, num_features] weight: a scalar weight for the loss. scope: optional name scope for summary tags. Returns: a scalar tensor representing the correlation loss value. """ with tf.name_scope('corr_loss'): source_samples -= tf.reduce_mean(source_samples, 0) target_samples -= tf.reduce_mean(target_samples, 0) source_samples = tf.nn.l2_normalize(source_samples, 1) target_samples = tf.nn.l2_normalize(target_samples, 1) source_cov = tf.matmul(tf.transpose(source_samples), source_samples) target_cov = tf.matmul(tf.transpose(target_samples), target_samples) corr_loss = tf.reduce_mean(tf.square(source_cov - target_cov)) * weight assert_op = tf.Assert(tf.is_finite(corr_loss), [corr_loss]) with tf.control_dependencies([assert_op]): tag = 'Correlation Loss' if scope: tag = scope + tag tf.summary.scalar(tag, corr_loss) tf.losses.add_loss(corr_loss) return corr_loss
Example #27
Source File: box_list_ops.py From DOTA_models with Apache License 2.0 | 5 votes |
def sort_by_field(boxlist, field, order=SortOrder.descend, scope=None): """Sort boxes and associated fields according to a scalar field. A common use case is reordering the boxes according to descending scores. Args: boxlist: BoxList holding N boxes. field: A BoxList field for sorting and reordering the BoxList. order: (Optional) descend or ascend. Default is descend. scope: name scope. Returns: sorted_boxlist: A sorted BoxList with the field in the specified order. Raises: ValueError: if specified field does not exist ValueError: if the order is not either descend or ascend """ with tf.name_scope(scope, 'SortByField'): if order != SortOrder.descend and order != SortOrder.ascend: raise ValueError('Invalid sort order') field_to_sort = boxlist.get_field(field) if len(field_to_sort.shape.as_list()) != 1: raise ValueError('Field should have rank 1') num_boxes = boxlist.num_boxes() num_entries = tf.size(field_to_sort) length_assert = tf.Assert( tf.equal(num_boxes, num_entries), ['Incorrect field size: actual vs expected.', num_entries, num_boxes]) with tf.control_dependencies([length_assert]): # TODO: Remove with tf.device when top_k operation runs correctly on GPU. with tf.device('/cpu:0'): _, sorted_indices = tf.nn.top_k(field_to_sort, num_boxes, sorted=True) if order == SortOrder.ascend: sorted_indices = tf.reverse_v2(sorted_indices, [0]) return gather(boxlist, sorted_indices)
Example #28
Source File: box_list_ops.py From DOTA_models with Apache License 2.0 | 5 votes |
def to_normalized_coordinates(boxlist, height, width, check_range=True, scope=None): """Converts absolute box coordinates to normalized coordinates in [0, 1]. Usually one uses the dynamic shape of the image or conv-layer tensor: boxlist = box_list_ops.to_normalized_coordinates(boxlist, tf.shape(images)[1], tf.shape(images)[2]), This function raises an assertion failed error at graph execution time when the maximum coordinate is smaller than 1.01 (which means that coordinates are already normalized). The value 1.01 is to deal with small rounding errors. Args: boxlist: BoxList with coordinates in terms of pixel-locations. height: Maximum value for height of absolute box coordinates. width: Maximum value for width of absolute box coordinates. check_range: If True, checks if the coordinates are normalized or not. scope: name scope. Returns: boxlist with normalized coordinates in [0, 1]. """ with tf.name_scope(scope, 'ToNormalizedCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) if check_range: max_val = tf.reduce_max(boxlist.get()) max_assert = tf.Assert(tf.greater(max_val, 1.01), ['max value is lower than 1.01: ', max_val]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(boxlist, 1 / height, 1 / width)
Example #29
Source File: box_list_ops.py From DOTA_models with Apache License 2.0 | 5 votes |
def to_absolute_coordinates(boxlist, height, width, check_range=True, scope=None): """Converts normalized box coordinates to absolute pixel coordinates. This function raises an assertion failed error when the maximum box coordinate value is larger than 1.01 (in which case coordinates are already absolute). Args: boxlist: BoxList with coordinates in range [0, 1]. height: Maximum value for height of absolute box coordinates. width: Maximum value for width of absolute box coordinates. check_range: If True, checks if the coordinates are normalized or not. scope: name scope. Returns: boxlist with absolute coordinates in terms of the image size. """ with tf.name_scope(scope, 'ToAbsoluteCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) # Ensure range of input boxes is correct. if check_range: box_maximum = tf.reduce_max(boxlist.get()) max_assert = tf.Assert(tf.greater_equal(1.01, box_maximum), ['maximum box coordinate value is larger ' 'than 1.01: ', box_maximum]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(boxlist, height, width)
Example #30
Source File: dsn.py From DOTA_models with Apache License 2.0 | 5 votes |
def add_task_loss(source_images, source_labels, basic_tower, params): """Adds a classification and/or pose estimation loss to the model. Args: source_images: images from the source domain, a tensor of size [batch_size, height, width, channels] source_labels: labels from the source domain, a tensor of size [batch_size]. or a tuple of (quaternions, class_labels) basic_tower: a function that creates the single tower of the model. params: A dictionary of parameters. Expecting 'weight_decay', 'pose_weight'. Returns: The source endpoints. Raises: RuntimeError: if basic tower does not support pose estimation. """ with tf.variable_scope('towers'): source_logits, source_endpoints = basic_tower( source_images, weight_decay=params['weight_decay'], prefix='Source') if 'quaternions' in source_labels: # We have pose estimation as well if 'quaternion_pred' not in source_endpoints: raise RuntimeError('Please use a model for estimation e.g. pose_mini') loss = losses.log_quaternion_loss(source_labels['quaternions'], source_endpoints['quaternion_pred'], params) assert_op = tf.Assert(tf.is_finite(loss), [loss]) with tf.control_dependencies([assert_op]): quaternion_loss = loss tf.summary.histogram('log_quaternion_loss_hist', quaternion_loss) slim.losses.add_loss(quaternion_loss * params['pose_weight']) tf.summary.scalar('losses/quaternion_loss', quaternion_loss) classification_loss = tf.losses.softmax_cross_entropy( source_labels['classes'], source_logits) tf.summary.scalar('losses/classification_loss', classification_loss) return source_endpoints