Python tensorflow.python.ops.math_ops.logical_not() Examples
The following are 30
code examples of tensorflow.python.ops.math_ops.logical_not().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.math_ops
, or try the search function
.
Example #1
Source File: tpu_estimator.py From Chinese-XLNet with Apache License 2.0 | 6 votes |
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def cond(scalar_stopping_signal): return math_ops.logical_not( _StopSignals.should_stop(scalar_stopping_signal)) def computation(unused_scalar_stopping_signal): return_value = op_fn() execute_ops = return_value['ops'] signals = return_value['signals'] with ops.control_dependencies(execute_ops): return _StopSignals.as_scalar_stopping_signal(signals) # By setting parallel_iterations=1, the parallel execution in while_loop is # basically turned off. with ops.device(device): return control_flow_ops.while_loop( cond, computation, [_StopSignals.NON_STOPPING_SIGNAL], parallel_iterations=1)
Example #2
Source File: tpu_estimator.py From xlnet with Apache License 2.0 | 6 votes |
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def cond(scalar_stopping_signal): return math_ops.logical_not( _StopSignals.should_stop(scalar_stopping_signal)) def computation(unused_scalar_stopping_signal): return_value = op_fn() execute_ops = return_value['ops'] signals = return_value['signals'] with ops.control_dependencies(execute_ops): return _StopSignals.as_scalar_stopping_signal(signals) # By setting parallel_iterations=1, the parallel execution in while_loop is # basically turned off. with ops.device(device): return control_flow_ops.while_loop( cond, computation, [_StopSignals.NON_STOPPING_SIGNAL], parallel_iterations=1)
Example #3
Source File: tpu_estimator.py From embedding-as-service with MIT License | 6 votes |
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def cond(scalar_stopping_signal): return math_ops.logical_not( _StopSignals.should_stop(scalar_stopping_signal)) def computation(unused_scalar_stopping_signal): return_value = op_fn() execute_ops = return_value['ops'] signals = return_value['signals'] with ops.control_dependencies(execute_ops): return _StopSignals.as_scalar_stopping_signal(signals) # By setting parallel_iterations=1, the parallel execution in while_loop is # basically turned off. with ops.device(device): return control_flow_ops.while_loop( cond, computation, [_StopSignals.NON_STOPPING_SIGNAL], parallel_iterations=1)
Example #4
Source File: math_grad.py From lambda-packs with MIT License | 6 votes |
def _MaximumMinimumGrad(op, grad, selector_op): """Factor out the code for the gradient of Maximum or Minimum.""" x = op.inputs[0] y = op.inputs[1] gdtype = grad.dtype sx = array_ops.shape(x) sy = array_ops.shape(y) gradshape = array_ops.shape(grad) zeros = array_ops.zeros(gradshape, gdtype) xmask = selector_op(x, y) rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) xgrad = array_ops.where(xmask, grad, zeros) ygrad = array_ops.where(math_ops.logical_not(xmask), grad, zeros) gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx) gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy) return (gx, gy)
Example #5
Source File: histogram_ops.py From tf-slim with Apache License 2.0 | 6 votes |
def _make_auc_histograms(boolean_labels, scores, score_range, nbins): """Create histogram tensors from one batch of labels/scores.""" with variable_scope.variable_scope( None, 'make_auc_histograms', [boolean_labels, scores, nbins]): # Histogram of scores for records in this batch with True label. hist_true = histogram_ops.histogram_fixed_width( array_ops.boolean_mask(scores, boolean_labels), score_range, nbins=nbins, dtype=dtypes.int64, name='hist_true') # Histogram of scores for records in this batch with False label. hist_false = histogram_ops.histogram_fixed_width( array_ops.boolean_mask(scores, math_ops.logical_not(boolean_labels)), score_range, nbins=nbins, dtype=dtypes.int64, name='hist_false') return hist_true, hist_false
Example #6
Source File: histogram_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _make_auc_histograms(boolean_labels, scores, score_range, nbins): """Create histogram tensors from one batch of labels/scores.""" with variable_scope.variable_scope( None, 'make_auc_histograms', [boolean_labels, scores, nbins]): # Histogram of scores for records in this batch with True label. hist_true = histogram_ops.histogram_fixed_width( array_ops.boolean_mask(scores, boolean_labels), score_range, nbins=nbins, dtype=dtypes.int64, name='hist_true') # Histogram of scores for records in this batch with False label. hist_false = histogram_ops.histogram_fixed_width( array_ops.boolean_mask(scores, math_ops.logical_not(boolean_labels)), score_range, nbins=nbins, dtype=dtypes.int64, name='hist_false') return hist_true, hist_false
Example #7
Source File: math_grad.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _MaximumMinimumGrad(op, grad, selector_op): """Factor out the code for the gradient of Maximum or Minimum.""" x = op.inputs[0] y = op.inputs[1] gdtype = grad.dtype sx = array_ops.shape(x) sy = array_ops.shape(y) gradshape = array_ops.shape(grad) zeros = array_ops.zeros(gradshape, gdtype) xmask = selector_op(x, y) rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) xgrad = array_ops.where(xmask, grad, zeros) ygrad = array_ops.where(math_ops.logical_not(xmask), grad, zeros) gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx) gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy) return (gx, gy)
Example #8
Source File: tpu_estimator.py From transformer-xl with Apache License 2.0 | 6 votes |
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def cond(scalar_stopping_signal): return math_ops.logical_not( _StopSignals.should_stop(scalar_stopping_signal)) def computation(unused_scalar_stopping_signal): return_value = op_fn() execute_ops = return_value['ops'] signals = return_value['signals'] with ops.control_dependencies(execute_ops): return _StopSignals.as_scalar_stopping_signal(signals) # By setting parallel_iterations=1, the parallel execution in while_loop is # basically turned off. with ops.device(device): return control_flow_ops.while_loop( cond, computation, [_StopSignals.NON_STOPPING_SIGNAL], parallel_iterations=1)
Example #9
Source File: histogram_ops.py From deep_image_model with Apache License 2.0 | 6 votes |
def _make_auc_histograms(boolean_labels, scores, score_range, nbins): """Create histogram tensors from one batch of labels/scores.""" with variable_scope.variable_scope( None, 'make_auc_histograms', [boolean_labels, scores, nbins]): # Histogram of scores for records in this batch with True label. hist_true = histogram_ops.histogram_fixed_width( array_ops.boolean_mask(scores, boolean_labels), score_range, nbins=nbins, dtype=dtypes.int64, name='hist_true') # Histogram of scores for records in this batch with False label. hist_false = histogram_ops.histogram_fixed_width( array_ops.boolean_mask(scores, math_ops.logical_not(boolean_labels)), score_range, nbins=nbins, dtype=dtypes.int64, name='hist_false') return hist_true, hist_false
Example #10
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #11
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #12
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 5 votes |
def _test_logical_binary(logical_bin_op, data): with tf.Graph().as_default(): in_data = [array_ops.placeholder(shape=data[0].shape, dtype='bool', name='in_0'), array_ops.placeholder(shape=data[1].shape, dtype='bool', name='in_1')] if logical_bin_op == math_ops.logical_not: out = math_ops.logical_or(in_data[0], in_data[1], name='out1') out = logical_bin_op(out, name='out') else: out = logical_bin_op(in_data[0], in_data[1], name='out') compare_tflite_with_tvm(data, ['in_0:0', 'in_1:0'], in_data, [out])
Example #13
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #14
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #15
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #16
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #17
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #18
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #19
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #20
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #21
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #22
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #23
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #24
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #25
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #26
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #27
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #28
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #29
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score
Example #30
Source File: common_layers.py From TTS with Mozilla Public License 2.0 | 5 votes |
def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score