Python tensorflow.python.ops.nn_ops.l2_loss() Examples
The following are 10
code examples of tensorflow.python.ops.nn_ops.l2_loss().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.nn_ops
, or try the search function
.
Example #1
Source File: nn_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def test_unary_ops(self): ops = [ ('relu', nn_ops.relu, nn.relu), ('relu6', nn_ops.relu6, nn.relu6), ('crelu', nn_ops.crelu, nn.crelu), ('elu', nn_ops.elu, nn.elu), ('softplus', nn_ops.softplus, nn.softplus), ('l2_loss', nn_ops.l2_loss, nn.l2_loss), ('softmax', nn_ops.softmax, nn.softmax), ('log_softmax', nn_ops.log_softmax, nn.log_softmax), ] for op_name, tf_op, lt_op in ops: golden_tensor = tf_op(self.original_lt.tensor) golden_lt = core.LabeledTensor(golden_tensor, self.axes) actual_lt = lt_op(self.original_lt) self.assertIn(op_name, actual_lt.name) self.assertLabeledTensorsEqual(golden_lt, actual_lt)
Example #2
Source File: nn_test.py From keras-lambda with MIT License | 6 votes |
def test_unary_ops(self): ops = [ ('relu', nn_ops.relu, nn.relu), ('relu6', nn_ops.relu6, nn.relu6), ('crelu', nn_ops.crelu, nn.crelu), ('elu', nn_ops.elu, nn.elu), ('softplus', nn_ops.softplus, nn.softplus), ('l2_loss', nn_ops.l2_loss, nn.l2_loss), ('softmax', nn_ops.softmax, nn.softmax), ('log_softmax', nn_ops.log_softmax, nn.log_softmax), ] for op_name, tf_op, lt_op in ops: golden_tensor = tf_op(self.original_lt.tensor) golden_lt = core.LabeledTensor(golden_tensor, self.axes) actual_lt = lt_op(self.original_lt) self.assertIn(op_name, actual_lt.name) self.assertLabeledTensorsEqual(golden_lt, actual_lt)
Example #3
Source File: util.py From lambda-packs with MIT License | 5 votes |
def l2norm_squared(v): return constant_op.constant(2, dtype=v.dtype.base_dtype) * nn_ops.l2_loss(v)
Example #4
Source File: util.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def l2norm_squared(v): return constant_op.constant(2, dtype=v.dtype.base_dtype) * nn_ops.l2_loss(v)
Example #5
Source File: variables_test.py From tf-slim with Apache License 2.0 | 5 votes |
def testVariableWithRegularizer(self): with self.cached_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable('a', [], regularizer=nn_ops.l2_loss) loss = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0] self.assertDeviceEqual(loss.device, a.device)
Example #6
Source File: variables_test.py From tf-slim with Apache License 2.0 | 5 votes |
def testVariableWithRegularizerColocate(self): with self.cached_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable( 'a', [], device='gpu:0', regularizer=nn_ops.l2_loss) loss = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0] self.assertDeviceEqual(loss.device, a.device)
Example #7
Source File: layers_test.py From tf-slim with Apache License 2.0 | 5 votes |
def testCreateConvWithWD(self): height, width = 7, 9 weight_decay = 0.01 with self.cached_session() as sess: images = random_ops.random_uniform((5, height, width, 3), seed=1) regularizer = regularizers.l2_regularizer(weight_decay) layers_lib.convolution2d( images, 32, [3, 3], weights_regularizer=regularizer) l2_loss = nn_ops.l2_loss(variables.get_variables_by_name('weights')[0]) wd = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0] self.assertEqual(wd.op.name, 'Conv/kernel/Regularizer/l2_regularizer') sess.run(variables_lib.global_variables_initializer()) self.assertAlmostEqual(sess.run(wd), weight_decay * l2_loss.eval())
Example #8
Source File: util.py From keras-lambda with MIT License | 5 votes |
def l2norm_squared(v): return constant_op.constant(2, dtype=v.dtype.base_dtype) * nn_ops.l2_loss(v)
Example #9
Source File: test_patch_bias_add.py From framework-determinism with Apache License 2.0 | 4 votes |
def _computeGradient(self, np_input, bias, dtype, data_format): input_shape = output_shape = np_input.shape bias_shape = bias.shape input_tensor = constant_op.constant( np_input, shape=input_shape, dtype=dtype) bias_tensor = constant_op.constant(bias, shape=bias_shape, dtype=dtype) if context.executing_eagerly(): def bias_add(input_tensor, bias_tensor): return nn_ops.bias_add( input_tensor, bias_tensor, data_format=data_format) # The following is a work-around for TF issue 33660. Instead of # calculating the analytical and numerical gradients for both # inputs in a single call to compute_gradient, compute_gradient # is called for each input separately. def bias_add_1(input_tensor): return bias_add(input_tensor, bias_tensor) def bias_add_2(bias_tensor): return bias_add(input_tensor, bias_tensor) input_jacob_a, input_jacob_n = gradient_checker_v2.compute_gradient( bias_add_1, [input_tensor]) bias_jacob_a, bias_jacob_n = gradient_checker_v2.compute_gradient( bias_add_2, [bias_tensor]) # Test gradient of BiasAddGrad def bias_add_grad_function(upstream_gradients): with backprop.GradientTape() as tape: tape.watch(bias_tensor) bias_add_output = bias_add(input_tensor, bias_tensor) gradient_injector_output = bias_add_output * upstream_gradients return tape.gradient(gradient_injector_output, bias_tensor) upstream_tensor = self._random_tensor(output_shape, dtype) grad_jacob_a, grad_jacob_n = gradient_checker_v2.compute_gradient( bias_add_grad_function, [upstream_tensor]) else: output_tensor = nn_ops.bias_add( input_tensor, bias_tensor, data_format=data_format) jacobians = gradient_checker.compute_gradient( [input_tensor, bias_tensor], [input_shape, bias_shape], output_tensor, output_shape) (input_jacob_a, input_jacob_n), (bias_jacob_a, bias_jacob_n) = jacobians # Test gradient of BiasAddGrad bias_add_grad = gradients_impl.gradients( nn_ops.l2_loss(output_tensor), bias_tensor)[0] grad_jacob_a, grad_jacob_n = gradient_checker.compute_gradient( output_tensor, output_shape, bias_add_grad, bias_shape) return ((input_jacob_a, bias_jacob_a, grad_jacob_a), (input_jacob_n, bias_jacob_n, grad_jacob_n))
Example #10
Source File: clip_ops.py From deep_image_model with Apache License 2.0 | 4 votes |
def global_norm(t_list, name=None): """Computes the global norm of multiple tensors. Given a tuple or list of tensors `t_list`, this operation returns the global norm of the elements in all tensors in `t_list`. The global norm is computed as: `global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))` Any entries in `t_list` that are of type None are ignored. Args: t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None. name: A name for the operation (optional). Returns: A 0-D (scalar) `Tensor` of type `float`. Raises: TypeError: If `t_list` is not a sequence. """ if (not isinstance(t_list, collections.Sequence) or isinstance(t_list, six.string_types)): raise TypeError("t_list should be a sequence") t_list = list(t_list) with ops.name_scope(name, "global_norm", t_list) as name: values = [ ops.convert_to_tensor( t.values if isinstance(t, ops.IndexedSlices) else t, name="t_%d" % i) if t is not None else t for i, t in enumerate(t_list)] half_squared_norms = [] for v in values: if v is not None: with ops.colocate_with(v): half_squared_norms.append(nn_ops.l2_loss(v)) half_squared_norm = math_ops.reduce_sum(array_ops.pack(half_squared_norms)) norm = math_ops.sqrt( half_squared_norm * constant_op.constant(2.0, dtype=half_squared_norm.dtype), name="global_norm") return norm