Python tensorflow.python.ops.gradient_checker.compute_gradient() Examples
The following are 5
code examples of tensorflow.python.ops.gradient_checker.compute_gradient().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.gradient_checker
, or try the search function
.
Example #1
Source File: backward_warp.py From UnFlow with MIT License | 6 votes |
def _warp_test(self, first, second, flow, debug=False): with self.test_session(use_gpu=True) as sess: num_batch, height, width, channels = second.shape #second_ = tf.placeholder(tf.float32, shape=second.shape, name='im') flow_ = tf.placeholder(tf.float32, shape=flow.shape, name='flow') inv_warped_second = ops.backward_warp(second, flow_) pred = sess.run(inv_warped_second, feed_dict={flow_: flow}) if debug: print('-- result channels') for c in range(channels): print(np.reshape(pred[0, :, :, c], [height, width])) self.assertAllClose(first, pred) jacob_t, jacob_n = gradient_checker.compute_gradient(flow_, flow.shape, inv_warped_second, pred.shape) self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
Example #2
Source File: correlation.py From UnFlow with MIT License | 6 votes |
def _test_correlation(self, in0, in1, out=None, **kwargs): with self.test_session(use_gpu=True) as sess: in0_op = tf.constant(in0, tf.float32) in1_op = tf.constant(in1, tf.float32) result_op = ops.correlation(in0_op, in1_op, **kwargs) result = sess.run(result_op) if out is not None: self.assertAllClose(out, result) jacob_t, jacob_n = gradient_checker.compute_gradient([in0_op, in1_op], [in0.shape, in1.shape], result_op, result.shape) #print("--------------- n") #print(jacob_n) #print("--------------- t") #print(jacob_t) self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
Example #3
Source File: test_gradients.py From crfasrnn_keras with MIT License | 5 votes |
def test_high_dim_filter_grad(self): x_shape = [5, 10, 10] # Test inputs: unaries and RGB values unary_np = np.random.randn(*x_shape).astype(np.float32) rgb_np = np.random.randint(low=0, high=256, size=x_shape).astype(np.float32) with self.test_session(): unary_tf = constant_op.constant(unary_np) rgb_tf = constant_op.constant(rgb_np) y_tf = custom_module.high_dim_filter(unary_tf, rgb_tf, bilateral=True, theta_alpha=1000., theta_beta=1000., theta_gamma=1000.) out = gradient_checker.compute_gradient([unary_tf, rgb_tf], [x_shape, x_shape], y_tf, x_shape) # We only need to compare gradients w.r.t. unaries computed = out[0][0].flatten() estimated = out[0][1].flatten() mask = (computed != 0) computed = computed[mask] estimated = estimated[mask] difference = computed - estimated measure1 = np.mean(difference) / np.mean(computed) measure2 = np.max(difference) / np.max(computed) print('Gradient check: measure1 = {:.6f}, measure2 = {:.6f}'.format(measure1, measure2)) self.assertLess(measure1, 1e-3, 'Errors found in the gradient computation.') self.assertLess(measure2, 2e-2, 'Errors found in the gradient computation.') print('Gradient check: success!')
Example #4
Source File: forward_warp.py From UnFlow with MIT License | 5 votes |
def test_grad(self): with self.test_session(use_gpu=True) as sess: flow_shape = [1, 10, 10, 2] warped_shape = [1, 10, 10, 1] flow_ = tf.placeholder(tf.float32, shape=flow_shape, name='flow') warped_ = ops.forward_warp(flow_) jacob_t, jacob_n = gradient_checker.compute_gradient(flow_, flow_shape, warped_, warped_shape) self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
Example #5
Source File: test_patch_bias_add.py From framework-determinism with Apache License 2.0 | 4 votes |
def _computeGradient(self, np_input, bias, dtype, data_format): input_shape = output_shape = np_input.shape bias_shape = bias.shape input_tensor = constant_op.constant( np_input, shape=input_shape, dtype=dtype) bias_tensor = constant_op.constant(bias, shape=bias_shape, dtype=dtype) if context.executing_eagerly(): def bias_add(input_tensor, bias_tensor): return nn_ops.bias_add( input_tensor, bias_tensor, data_format=data_format) # The following is a work-around for TF issue 33660. Instead of # calculating the analytical and numerical gradients for both # inputs in a single call to compute_gradient, compute_gradient # is called for each input separately. def bias_add_1(input_tensor): return bias_add(input_tensor, bias_tensor) def bias_add_2(bias_tensor): return bias_add(input_tensor, bias_tensor) input_jacob_a, input_jacob_n = gradient_checker_v2.compute_gradient( bias_add_1, [input_tensor]) bias_jacob_a, bias_jacob_n = gradient_checker_v2.compute_gradient( bias_add_2, [bias_tensor]) # Test gradient of BiasAddGrad def bias_add_grad_function(upstream_gradients): with backprop.GradientTape() as tape: tape.watch(bias_tensor) bias_add_output = bias_add(input_tensor, bias_tensor) gradient_injector_output = bias_add_output * upstream_gradients return tape.gradient(gradient_injector_output, bias_tensor) upstream_tensor = self._random_tensor(output_shape, dtype) grad_jacob_a, grad_jacob_n = gradient_checker_v2.compute_gradient( bias_add_grad_function, [upstream_tensor]) else: output_tensor = nn_ops.bias_add( input_tensor, bias_tensor, data_format=data_format) jacobians = gradient_checker.compute_gradient( [input_tensor, bias_tensor], [input_shape, bias_shape], output_tensor, output_shape) (input_jacob_a, input_jacob_n), (bias_jacob_a, bias_jacob_n) = jacobians # Test gradient of BiasAddGrad bias_add_grad = gradients_impl.gradients( nn_ops.l2_loss(output_tensor), bias_tensor)[0] grad_jacob_a, grad_jacob_n = gradient_checker.compute_gradient( output_tensor, output_shape, bias_add_grad, bias_shape) return ((input_jacob_a, bias_jacob_a, grad_jacob_a), (input_jacob_n, bias_jacob_n, grad_jacob_n))