Python tensorflow.compat.v2.reduce_sum() Examples

The following are 30 code examples of tensorflow.compat.v2.reduce_sum(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v2 , or try the search function .
Example #1
Source File: util.py    From language with Apache License 2.0 6 votes vote down vote up
def labels_of_top_ranked_predictions_in_batch(labels, predictions):
  """Applying tf.metrics.mean to this gives precision at 1.

  Args:
    labels: minibatch of dense 0/1 labels, shape [batch_size rows, num_classes]
    predictions: minibatch of predictions of the same shape

  Returns:
    one-dimension tensor top_labels, where top_labels[i]=1.0 iff the
    top-scoring prediction for batch element i has label 1.0
  """
  indices_of_top_preds = tf.cast(tf.argmax(input=predictions, axis=1), tf.int32)
  batch_size = tf.reduce_sum(input_tensor=tf.ones_like(indices_of_top_preds))
  row_indices = tf.range(batch_size)
  thresholded_labels = tf.where(labels > 0.0, tf.ones_like(labels),
                                tf.zeros_like(labels))
  label_indices_to_gather = tf.transpose(
      a=tf.stack([row_indices, indices_of_top_preds]))
  return tf.gather_nd(thresholded_labels, label_indices_to_gather) 
Example #2
Source File: __init__.py    From language with Apache License 2.0 6 votes vote down vote up
def weighted_by_sum(
      self, other):
    """Weight elements in some set by the sum of the scores in some other set.

    Args:
      other: A NeuralQueryExpression

    Returns:
      The NeuralQueryExpression that evaluates to the reweighted version of
    the set obtained by evaluating 'self'.
    """
    provenance = NQExprProvenance(
        operation='weighted_by_sum',
        inner=self.provenance,
        other=other.provenance)
    with tf.name_scope('weighted_by_sum'):
      return self.context.as_nql(
          self.tf * tf.reduce_sum(input_tensor=other.tf, axis=1, keepdims=True),
          self._type_name, provenance) 
Example #3
Source File: halton_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def test_randomized_qmc_basic(self):
    """Tests the randomization of the random.halton sequences."""
    # This test is identical to the example given in Owen (2017), Figure 5.
    dim = 20
    num_results = 2000
    replica = 5
    seed = 121117

    values = []
    for i in range(replica):
      sample, _ = random.halton.sample(dim, num_results=num_results,
                                       seed=seed + i)
      f = tf.reduce_mean(
          input_tensor=tf.reduce_sum(input_tensor=sample, axis=1)**2)
      values.append(self.evaluate(f))
    self.assertAllClose(np.mean(values), 101.6667, atol=np.std(values) * 2) 
Example #4
Source File: __init__.py    From language with Apache License 2.0 6 votes vote down vote up
def nonneg_crossentropy(expr, target):
  """A cross entropy operator that is appropriate for NQL outputs.

  Query expressions often evaluate to sparse vectors.  This evaluates cross
  entropy safely.

  Args:
    expr: a Tensorflow expression for some predicted values.
    target: a Tensorflow expression for target values.

  Returns:
    Tensorflow expression for cross entropy.
  """
  expr_replacing_0_with_1 = \
     tf.where(expr > 0, expr, tf.ones(tf.shape(input=expr), tf.float32))
  cross_entropies = tf.reduce_sum(
      input_tensor=-target * tf.math.log(expr_replacing_0_with_1), axis=1)
  return tf.reduce_mean(input_tensor=cross_entropies, axis=0) 
Example #5
Source File: nql_test.py    From language with Apache License 2.0 5 votes vote down vote up
def test_gradients(self):
    with tf.GradientTape(persistent=True) as g:
      x = self.context.one(cell(2, 2), 'place_t')
      near_x = x.follow('n') + x.follow('s') + x.follow('e') + x.follow('w')
      lr_near_x = near_x.weighted_by('trained_distance_to', 'ul')
      g.watch(self.context.get_underlying_parameter('trained_distance_to'))
      expected_y = self.context.one(cell(
          1, 2), 'place_t') * 3 + self.context.one(
              cell(2, 1), 'place_t') * 3 + self.context.one(
                  cell(3, 2), 'place_t') * 5 + self.context.one(
                      cell(2, 3), 'place_t') * 5
      almost_y = self.context.one(cell(1, 2), 'place_t') * 2 + self.context.one(
          cell(2, 1), 'place_t') * 3 + self.context.one(
              cell(3, 2), 'place_t') * 4 + self.context.one(
                  cell(2, 3), 'place_t') * 5
      # compute some gradients
      loss_1 = tf.reduce_sum(
          input_tensor=tf.multiply(lr_near_x.tf - expected_y.tf, lr_near_x.tf -
                                   expected_y.tf))
      loss_2 = tf.reduce_sum(
          input_tensor=tf.multiply(lr_near_x.tf - almost_y.tf, lr_near_x.tf -
                                   almost_y.tf))

      grad_1 = g.gradient(
          target=loss_1,
          sources=self.context.get_underlying_parameter('trained_distance_to'))
      grad_2 = g.gradient(
          target=loss_2,
          sources=self.context.get_underlying_parameter('trained_distance_to'))
    self.assertEqual(loss_1.numpy(), 0.0)
    self.assertEqual(loss_2.numpy(), 2.0)
    sum_grad_1 = tf.reduce_sum(input_tensor=grad_1)
    sum_grad_2 = tf.reduce_sum(input_tensor=grad_2)
    self.assertEqual(sum_grad_1.numpy(), 0.0)
    self.assertEqual(sum_grad_2.numpy(), 4.0) 
Example #6
Source File: conjugate_gradient_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_float32(self):
    minimum = np.array([1.0, 1.0], dtype=np.float32)
    scales = np.array([2.0, 3.0], dtype=np.float32)
    start = np.zeros_like(minimum)

    @tff.math.make_val_and_grad_fn
    def quadratic(x):
      return tf.reduce_sum(input_tensor=scales * (x - minimum)**2)

    result = tff.math.optimizer.conjugate_gradient_minimize(
        quadratic, initial_position=start)
    self.assertEqual(result.position.dtype, tf.float32)
    self.assertArrayNear(self.evaluate(result.position), minimum, 1e-5) 
Example #7
Source File: conjugate_gradient.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _dot(x, y):
  """Evaluates scalar product."""
  return tf.math.reduce_sum(x * y, axis=-1) 
Example #8
Source File: conjugate_gradient.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _norm_sq(x):
  """Evaluates L2 norm squared."""
  return tf.math.reduce_sum(tf.square(x), axis=-1) 
Example #9
Source File: linear_interpolation_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_valid_gradients(self, optimize_for_tpu):
    """Tests none of the gradients is nan."""

    # In this example, `x[0]` and `x[1]` are both less than or equal to
    # `x_data[0]`. `x[-2]` and `x[-1]` are both greater than or equal to
    # `x_data[-1]`. They are set up this way to test none of the tf.where
    # branches of the implementation have any nan. An unselected nan could still
    # propagate through gradient calculation with the end result being nan.
    x = [[-10.0, -1.0, 1.0, 3.0, 6.0, 7.0], [8.0, 15.0, 18.0, 25.0, 30.0, 35.0]]
    x_data = [[-1.0, 2.0, 6.0], [8.0, 18.0, 30.0]]

    def _value_helper_fn(y_data):
      """A helper function that returns sum of squared interplated values."""

      interpolated_values = tff.math.interpolation.linear.interpolate(
          x, x_data, y_data,
          optimize_for_tpu=optimize_for_tpu,
          dtype=tf.float64)
      return tf.reduce_sum(tf.math.square(interpolated_values))

    y_data = tf.convert_to_tensor([[10.0, -1.0, -5.0], [7.0, 9.0, 20.0]],
                                  dtype=tf.float64)
    if tf.executing_eagerly():
      with tf.GradientTape(watch_accessed_variables=False) as tape:
        tape.watch(y_data)
        value = _value_helper_fn(y_data=y_data)
        gradients = tape.gradient(value, y_data)
    else:
      value = _value_helper_fn(y_data=y_data)
      gradients = tf.gradients(value, y_data)[0]

    gradients = tf.convert_to_tensor(gradients)

    self.assertFalse(self.evaluate(tf.reduce_any(tf.math.is_nan(gradients)))) 
Example #10
Source File: nql_test.py    From language with Apache License 2.0 5 votes vote down vote up
def test_grid_colors(self):
    black_color = self.context.one('black', 'color_t')
    black_cells = black_color.follow('color', -1)
    all_cells = self.context.all('place_t')
    num_cells = tf.reduce_sum(input_tensor=all_cells.tf)
    num_black_cells = tf.reduce_sum(input_tensor=black_cells.tf)
    self.assertEqual(num_cells.numpy(), 17.0)
    self.assertEqual(num_black_cells.numpy(), 8.0) 
Example #11
Source File: conjugate_gradient_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_multiple_functions(self):
    # Define 3 independednt quadratic functions, each with its own minimum.
    minima = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
    func = lambda x: tf.reduce_sum(tf.square(x - minima), axis=1)
    self._check_algorithm(
        func=func, start_point=np.zeros_like(minima), expected_argmin=minima) 
Example #12
Source File: multi_objective_scalarizer.py    From agents with Apache License 2.0 5 votes vote down vote up
def call(self, multi_objectives: tf.Tensor) -> tf.Tensor:
    return tf.reduce_sum(multi_objectives * self._weights, axis=1) 
Example #13
Source File: gumbel_softmax.py    From agents with Apache License 2.0 5 votes vote down vote up
def _log_prob(self, x):
    if x.dtype != self.distribution.logits.dtype:
      # Calculate log_prob using the underlying categorical distribution when
      # the input is discrete.
      x = tf.cast(x, self.distribution.logits.dtype)
      return tf.reduce_sum(
          x * tf.math.log_softmax(self.distribution.logits), axis=-1)
    # Add an epsilon to prevent INF.
    x += 1e-10
    return super(GumbelSoftmax, self)._log_prob(x) 
Example #14
Source File: continuous_batched.py    From compression with Apache License 2.0 5 votes vote down vote up
def bits(self, bottleneck, training=True):
    """Estimates the number of bits needed to compress a tensor.

    Arguments:
      bottleneck: `tf.Tensor` containing the data to be compressed. Must have at
        least `self.coding_rank` dimensions, and the innermost dimensions must
        be broadcastable to `self.prior_shape`.
      training: Boolean. If `False`, computes the Shannon information of
        `bottleneck` under the distribution `self.prior`, which is a
        non-differentiable, tight *lower* bound on the number of bits needed to
        compress `bottleneck` using `compress()`. If `True`, returns a somewhat
        looser, but differentiable *upper* bound on this quantity.

    Returns:
      A `tf.Tensor` having the same shape as `bottleneck` without the
      `self.coding_rank` innermost dimensions, containing the number of bits.
    """
    if training:
      quantized = bottleneck + tf.random.uniform(
          tf.shape(bottleneck), minval=-.5, maxval=.5, dtype=bottleneck.dtype)
    else:
      quantized = self.quantize(bottleneck)
    probs = self.prior.prob(quantized)
    probs = math_ops.lower_bound(probs, self.likelihood_bound)
    axes = tuple(range(-self.coding_rank, 0))
    bits = tf.reduce_sum(tf.math.log(probs), axis=axes) / -tf.math.log(2.)
    return bits 
Example #15
Source File: continuous_indexed.py    From compression with Apache License 2.0 5 votes vote down vote up
def bits(self, bottleneck, indexes, training=True):
    """Estimates the number of bits needed to compress a tensor.

    Arguments:
      bottleneck: `tf.Tensor` containing the data to be compressed.
      indexes: `tf.Tensor` specifying the scalar distribution for each element
        in `bottleneck`. See class docstring for examples.
      training: Boolean. If `False`, computes the Shannon information of
        `bottleneck` under the distribution computed by `self.prior_fn`,
        which is a non-differentiable, tight *lower* bound on the number of bits
        needed to compress `bottleneck` using `compress()`. If `True`, returns a
        somewhat looser, but differentiable *upper* bound on this quantity.

    Returns:
      A `tf.Tensor` having the same shape as `bottleneck` without the
      `self.coding_rank` innermost dimensions, containing the number of bits.
    """
    indexes = self._normalize_indexes(indexes)
    prior = self._make_prior(indexes)
    if training:
      quantized = bottleneck + tf.random.uniform(
          tf.shape(bottleneck), minval=-.5, maxval=.5, dtype=bottleneck.dtype)
    else:
      offset = helpers.quantization_offset(prior)
      quantized = self._quantize(bottleneck, offset)
    probs = prior.prob(quantized)
    probs = math_ops.lower_bound(probs, self.likelihood_bound)
    axes = tuple(range(-self.coding_rank, 0))
    bits = tf.reduce_sum(tf.math.log(probs), axis=axes) / -tf.math.log(2.)
    return bits 
Example #16
Source File: exporter_lib_tf2_test.py    From models with Apache License 2.0 5 votes vote down vote up
def postprocess(self, prediction_dict, true_image_shapes):
    predict_tensor_sum = tf.reduce_sum(prediction_dict['image'])
    with tf.control_dependencies(list(prediction_dict.values())):
      postprocessed_tensors = {
          'detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5],
                                           [0.5, 0.5, 0.8, 0.8]],
                                          [[0.5, 0.5, 1.0, 1.0],
                                           [0.0, 0.0, 0.0, 0.0]]], tf.float32),
          'detection_scores': predict_tensor_sum + tf.constant(
              [[0.7, 0.6], [0.9, 0.0]], tf.float32),
          'detection_classes': tf.constant([[0, 1],
                                            [1, 0]], tf.float32),
          'num_detections': tf.constant([2, 1], tf.float32),
      }
    return postprocessed_tensors 
Example #17
Source File: ncf_keras_main.py    From models with Apache License 2.0 5 votes vote down vote up
def call(self, inputs, training=False):
    logits, dup_mask = inputs

    if training:
      hr_sum = 0.0
      hr_count = 0.0
    else:
      metric, metric_weights = metric_fn(logits, dup_mask, self.match_mlperf)
      hr_sum = tf.reduce_sum(metric * metric_weights)
      hr_count = tf.reduce_sum(metric_weights)

    self.add_metric(hr_sum, name="hr_sum", aggregation="mean")
    self.add_metric(hr_count, name="hr_count", aggregation="mean")
    return logits 
Example #18
Source File: array_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def sum(a, axis=None, dtype=None, keepdims=None):  # pylint: disable=redefined-builtin
  return _reduce(tf.reduce_sum, a, axis=axis, dtype=dtype, keepdims=keepdims,
                 tf_bool_fn=tf.reduce_any) 
Example #19
Source File: conjugate_gradient_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_data_fitting(self):
    """Tests MLE estimation for a simple geometric GLM."""
    n, dim = 100, 3
    dtype = tf.float64
    np.random.seed(234095)
    x = np.random.choice([0, 1], size=[dim, n])
    s = 0.01 * np.sum(x, 0)
    p = 1. / (1 + np.exp(-s))
    y = np.random.geometric(p)
    x_data = tf.convert_to_tensor(value=x, dtype=dtype)
    y_data = tf.expand_dims(tf.convert_to_tensor(value=y, dtype=dtype), -1)

    def neg_log_likelihood(state):
      state_ext = tf.expand_dims(state, 0)
      linear_part = tf.matmul(state_ext, x_data)
      linear_part_ex = tf.stack([tf.zeros_like(linear_part), linear_part],
                                axis=0)
      term1 = tf.squeeze(
          tf.matmul(tf.reduce_logsumexp(linear_part_ex, axis=0), y_data), -1)
      term2 = (0.5 * tf.reduce_sum(state_ext * state_ext, axis=-1) -
               tf.reduce_sum(linear_part, axis=-1))
      return tf.squeeze(term1 + term2)

    self._check_algorithm(
        func=neg_log_likelihood,
        start_point=np.ones(shape=[dim]),
        expected_argmin=[-0.020460034354, 0.171708568111, 0.021200423717]) 
Example #20
Source File: conjugate_gradient_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_logistic_regression(self):
    dim = 5
    n_objs = 10000
    np.random.seed(1)
    betas = np.random.randn(dim)  # The true beta
    intercept = np.random.randn()  # The true intercept
    features = np.random.randn(n_objs, dim)  # The feature matrix
    probs = 1 / (1 + np.exp(
        -np.matmul(features, np.expand_dims(betas, -1)) - intercept))
    labels = np.random.binomial(1, probs)  # The true labels
    regularization = 0.8
    feat = tf.constant(features, dtype=tf.float64)
    lab = tf.constant(labels, dtype=feat.dtype)

    def f_negative_log_likelihood(params):
      intercept, beta = params[0], params[1:]
      logit = tf.matmul(feat, tf.expand_dims(beta, -1)) + intercept
      log_likelihood = tf.reduce_sum(
          tf.nn.sigmoid_cross_entropy_with_logits(labels=lab, logits=logit))
      l2_penalty = regularization * tf.reduce_sum(beta**2)
      total_loss = log_likelihood + l2_penalty
      return total_loss
    start_point = np.ones(dim + 1)
    argmin = [
        -2.38636155, 1.61778325, -0.60694238, -0.51523609, -1.09832275,
        0.88892742
    ]

    self._check_algorithm(
        func=f_negative_log_likelihood,
        start_point=start_point,
        expected_argmin=argmin,
        gtol=1e-5) 
Example #21
Source File: conjugate_gradient_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_paraboloid_4th_order(self):
    self._check_algorithm(
        func=lambda x: tf.reduce_sum(x**4),
        start_point=[1, 2, 3, 4, 5],
        expected_argmin=[0, 0, 0, 0, 0],
        gtol=1e-10) 
Example #22
Source File: conjugate_gradient_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _rosenbrock(x):
  """See https://en.wikipedia.org/wiki/Rosenbrock_function."""
  term1 = 100 * tf.reduce_sum(tf.square(x[1:] - tf.square(x[:-1])))
  term2 = tf.reduce_sum(tf.square(1 - x[:-1]))
  return term1 + term2 
Example #23
Source File: optimizer_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_differential_evolution(self):
    """Use differential evolution algorithm to minimize a quadratic function."""
    minimum = np.array([1.0, 1.0])
    scales = np.array([2.0, 3.0])
    def quadratic(x):
      return tf.reduce_sum(
          scales * tf.math.squared_difference(x, minimum), axis=-1)

    initial_population = tf.random.uniform([40, 2], seed=1243)
    results = self.evaluate(tff_math.optimizer.differential_evolution_minimize(
        quadratic,
        initial_population=initial_population,
        func_tolerance=1e-12,
        seed=2484))
    self.assertTrue(results.converged) 
Example #24
Source File: gradient.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def make_val_and_grad_fn(value_fn):
  """Function decorator to compute both function value and gradient.

  For example:

  ```
  @tff.math.make_val_and_grad_fn
  def quadratic(x):
    return tf.reduce_sum(scales * (x - minimum) ** 2, axis=-1)
  ```

  Turns `quadratic` into a function that accepts a point as a `Tensor` as input
  and returns a tuple of two `Tensor`s with the value and the gradient of the
  defined quadratic function evaluated at the input point.

  This is useful for constucting functions to optimize with tff.math.optimizer
  methods.

  Args:
    value_fn: A python function to decorate.

  Returns:
    The decorated function.
  """
  @functools.wraps(value_fn)
  def val_and_grad(x):
    return value_and_gradient(value_fn, x)

  return val_and_grad 
Example #25
Source File: gradient_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_make_val_and_grad_fn(self):
    minimum = np.array([1.0, 1.0])
    scales = np.array([2.0, 3.0])

    @tff.math.make_val_and_grad_fn
    def quadratic(x):
      return tf.reduce_sum(input_tensor=scales * (x - minimum)**2)

    point = tf.constant([2.0, 2.0], dtype=tf.float64)
    val, grad = self.evaluate(quadratic(point))
    self.assertNear(val, 5.0, 1e-5)
    self.assertArrayNear(grad, [4.0, 6.0], 1e-5) 
Example #26
Source File: halton_impl.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _randomize(coeffs, radixes, seed, perms=None):
  """Applies the Owen (2017) randomization to the coefficients."""
  given_dtype = coeffs.dtype
  coeffs = tf.cast(coeffs, dtype=tf.int32)
  num_coeffs = _NUM_COEFFS_BY_DTYPE[given_dtype]
  radixes = tf.reshape(tf.cast(radixes, dtype=tf.int32), shape=[-1])
  if perms is None:
    perms = _get_permutations(num_coeffs, radixes, seed)
    perms = tf.reshape(perms, shape=[-1])
  radix_sum = tf.reduce_sum(input_tensor=radixes)
  radix_offsets = tf.reshape(tf.cumsum(radixes, exclusive=True), shape=[-1, 1])
  offsets = radix_offsets + tf.range(num_coeffs) * radix_sum
  permuted_coeffs = tf.gather(perms, coeffs + offsets)
  return tf.cast(permuted_coeffs, dtype=given_dtype), perms 
Example #27
Source File: losses.py    From ranking with Apache License 2.0 5 votes vote down vote up
def call(self, y_true, y_pred):
    """See _RankingLoss."""
    losses, weights = self._loss.compute_unreduced_loss(
        labels=y_true, logits=y_pred)
    losses = tf.multiply(losses, weights)
    # [batch_size, list_size, list_size]
    losses.get_shape().assert_has_rank(3)
    # Reduce the loss along the last dim so that weights ([batch_size, 1] or
    # [batch_size, list_size] can be applied in __call__.
    return tf.reduce_sum(losses, axis=2) 
Example #28
Source File: pixelcnn.py    From alibi-detect with Apache License 2.0 5 votes vote down vote up
def _init_norm(self):
        """Set the norm of the weight vector."""
        kernel_norm = tf.sqrt(tf.reduce_sum(tf.square(self.v), axis=self.kernel_norm_axes))
        self.g.assign(kernel_norm) 
Example #29
Source File: resnet50_ctl_tf2.py    From tpu_models with Apache License 2.0 5 votes vote down vote up
def safe_mean(losses):
  total = tf.reduce_sum(losses)
  num_elements = tf.dtypes.cast(tf.size(losses), dtype=losses.dtype)
  return tf.math.divide_no_nan(total, num_elements) 
Example #30
Source File: halton_test.py    From tf-quant-finance with Apache License 2.0 4 votes vote down vote up
def test_partial_sum_func_qmc(self):
    """Tests the QMC evaluation of (x_j + x_{j+1} ...+x_{n})^2.

    A good test of QMC is provided by the function:

      f(x_1,..x_n, x_{n+1}, ..., x_{n+m}) = (x_{n+1} + ... x_{n+m} - m / 2)^2

    with the coordinates taking values in the unit interval. The mean and
    variance of this function (with the uniform distribution over the
    unit-hypercube) is exactly calculable:

      <f> = m / 12, Var(f) = m (5m - 3) / 360

    The purpose of the "shift" (if n > 0) in the coordinate dependence of the
    function is to provide a test for Halton sequence which exhibit more
    dependence in the higher axes.

    This test confirms that the mean squared error of RQMC estimation falls
    as O(N^(2-e)) for any e>0.
    """
    n, m = 5, 5
    dim = n + m
    num_results_lo, num_results_hi = 500, 5000
    replica = 10
    true_mean = m / 12.
    seed_lo = 1925
    seed_hi = 898128

    def func_estimate(x):
      return tf.reduce_mean(
          input_tensor=tf.math.squared_difference(
              tf.reduce_sum(input_tensor=x[:, -m:], axis=-1), m / 2.))

    estimates = []
    for i in range(replica):
      sample_lo, _ = random.halton.sample(
          dim, num_results=num_results_lo, seed=seed_lo + i)
      sample_hi, _ = random.halton.sample(
          dim, num_results=num_results_hi, seed=seed_hi + i)
      f_lo, f_hi = func_estimate(sample_lo), func_estimate(sample_hi)
      estimates.append((self.evaluate(f_lo), self.evaluate(f_hi)))
    var_lo, var_hi = np.mean((np.array(estimates) - true_mean)**2, axis=0)

    # Expect that the variance scales as N^2 so var_hi / var_lo ~ k / 10^2
    # with k a fudge factor accounting for the residual N dependence
    # of the QMC error and the sampling error.
    log_rel_err = np.log(100 * var_hi / var_lo)
    self.assertAllClose(log_rel_err, 0., atol=1.2)