Python tensorflow.python.ops.random_ops.random_normal() Examples

The following are 30 code examples of tensorflow.python.ops.random_ops.random_normal(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.random_ops , or try the search function .
Example #1
Source File: student_t.py    From lambda-packs with MIT License 6 votes vote down vote up
def _sample_n(self, n, seed=None):
    # The sampling method comes from the fact that if:
    #   X ~ Normal(0, 1)
    #   Z ~ Chi2(df)
    #   Y = X / sqrt(Z / df)
    # then:
    #   Y ~ StudentT(df).
    shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
    normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed)
    df = self.df * array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
    gamma_sample = random_ops.random_gamma(
        [n],
        0.5 * df,
        beta=0.5,
        dtype=self.dtype,
        seed=distribution_util.gen_new_seed(seed, salt="student_t"))
    samples = normal_sample * math_ops.rsqrt(gamma_sample / df)
    return samples * self.scale + self.loc  # Abs(scale) not wanted. 
Example #2
Source File: backend.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
  """Returns a tensor with normal distribution of values.

  Arguments:
      shape: A tuple of integers, the shape of tensor to create.
      mean: A float, mean of the normal distribution to draw samples.
      stddev: A float, standard deviation of the normal distribution
          to draw samples.
      dtype: String, dtype of returned tensor.
      seed: Integer, random seed.

  Returns:
      A tensor.
  """
  if dtype is None:
    dtype = floatx()
  if seed is None:
    seed = np.random.randint(10e6)
  return random_ops.random_normal(
      shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed) 
Example #3
Source File: rnn_cell_test.py    From nlstm with MIT License 6 votes vote down vote up
def _check_non_tuple_cell(self, *args, **kwargs):
    batch_size = 2
    num_units = 3
    depth = 2
    g = ops.Graph()
    with self.test_session(graph=g) as sess:
      with g.as_default():
        cell = contrib_rnn_cell.NLSTMCell(num_units, depth,
                                          *args, **kwargs)
        init_state = cell.zero_state(batch_size, dtype=dtypes.float32)
        output, new_state = cell(
            inputs=random_ops.random_normal([batch_size, 5]),
            state=init_state)
        variables.global_variables_initializer().run()
        vals = sess.run([output, new_state])
    self.assertAllEqual(vals[0], vals[1][:, :3])
    self.assertAllEqual(vals[0].shape, [2, 3])
    self.assertAllEqual(vals[1].shape, [2, 9])
    self.assertEqual(cell.state_size, num_units * (depth + 1))
    self.assertEqual(cell.depth, depth)
    self.assertEqual(cell.output_size, num_units) 
Example #4
Source File: rnn_cell_test.py    From nlstm with MIT License 6 votes vote down vote up
def _check_tuple_cell(self, *args, **kwargs):
    batch_size = 2
    num_units = 3
    depth = 4
    g = ops.Graph()
    with self.test_session(graph=g) as sess:
      with g.as_default():
        cell = contrib_rnn_cell.NLSTMCell(num_units, depth, *args, **kwargs)
        init_state = cell.zero_state(batch_size, dtype=dtypes.float32)
        output, new_state = cell(
            inputs=random_ops.random_normal([batch_size, 5]),
            state=init_state)
        variables.global_variables_initializer().run()
        vals = sess.run([output, new_state])
    self.assertAllEqual(vals[0], vals[1][0])
    self.assertAllEqual(vals[0].shape, [2, 3])
    for val in vals[1]:
      self.assertAllEqual(val.shape, [2, 3])
    self.assertEqual(len(vals[1]), 5)
    self.assertAllEqual(cell.state_size, [num_units] * (depth + 1))
    self.assertEqual(cell.depth, depth)
    self.assertEqual(cell.output_size, num_units) 
Example #5
Source File: student_t.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def _sample_n(self, n, seed=None):
    # The sampling method comes from the fact that if:
    #   X ~ Normal(0, 1)
    #   Z ~ Chi2(df)
    #   Y = X / sqrt(Z / df)
    # then:
    #   Y ~ StudentT(df).
    shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
    normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed)
    df = self.df * array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
    gamma_sample = random_ops.random_gamma(
        [n],
        0.5 * df,
        beta=0.5,
        dtype=self.dtype,
        seed=distribution_util.gen_new_seed(seed, salt="student_t"))
    samples = normal_sample * math_ops.rsqrt(gamma_sample / df)
    return samples * self.scale + self.loc  # Abs(scale) not wanted. 
Example #6
Source File: linear_operator_test_util.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _make_x(self, operator, adjoint):
    # Return the number of systems for the argument 'x' for .apply(x)
    r = self._get_num_systems(operator)
    # If operator.shape = [B1,...,Bb, M, N] this returns a random matrix of
    # shape [B1,...,Bb, N, R], R = 1 or 2.
    if operator.shape.is_fully_defined():
      batch_shape = operator.batch_shape.as_list()
      if adjoint:
        n = operator.range_dimension.value
      else:
        n = operator.domain_dimension.value
      x_shape = batch_shape + [n, r]
    else:
      batch_shape = operator.batch_shape_dynamic()
      if adjoint:
        n = operator.range_dimension_dynamic()
      else:
        n = operator.domain_dimension_dynamic()
      x_shape = array_ops.concat((batch_shape, [n, r]), 0)

    return random_normal(x_shape, dtype=operator.dtype) 
Example #7
Source File: linear_operator_test_util.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _make_x(self, operator, adjoint):
    # Value of adjoint makes no difference because the operator is square.
    # Return the number of systems to solve, R, equal to 1 or 2.
    r = self._get_num_systems(operator)
    # If operator.shape = [B1,...,Bb, N, N] this returns a random matrix of
    # shape [B1,...,Bb, N, R], R = 1 or 2.
    if operator.shape.is_fully_defined():
      batch_shape = operator.batch_shape.as_list()
      n = operator.domain_dimension.value
      x_shape = batch_shape + [n, r]
    else:
      batch_shape = operator.batch_shape_dynamic()
      n = operator.domain_dimension_dynamic()
      x_shape = array_ops.concat((batch_shape, [n, r]), 0)

    return random_normal(x_shape, dtype=operator.dtype) 
Example #8
Source File: student_t.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _sample_n(self, n, seed=None):
    # The sampling method comes from the fact that if:
    #   X ~ Normal(0, 1)
    #   Z ~ Chi2(df)
    #   Y = X / sqrt(Z / df)
    # then:
    #   Y ~ StudentT(df).
    shape = array_ops.concat([[n], self.batch_shape()], 0)
    normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed)
    df = self.df * array_ops.ones(self.batch_shape(), dtype=self.dtype)
    gamma_sample = random_ops.random_gamma(
        [n],
        0.5 * df,
        beta=0.5,
        dtype=self.dtype,
        seed=distribution_util.gen_new_seed(seed, salt="student_t"))
    samples = normal_sample / math_ops.sqrt(gamma_sample / df)
    return samples * self.sigma + self.mu  # Abs(sigma) not wanted. 
Example #9
Source File: init_ops.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def random_normal_initializer(mean=0.0, stddev=1.0, seed=None,
                              dtype=dtypes.float32):
  """Returns an initializer that generates tensors with a normal distribution.

  Args:
    mean: a python scalar or a scalar tensor. Mean of the random values
      to generate.
    stddev: a python scalar or a scalar tensor. Standard deviation of the
      random values to generate.
    seed: A Python integer. Used to create random seeds. See
      [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
      for behavior.
    dtype: The data type. Only floating point types are supported.

  Returns:
    An initializer that generates tensors with a normal distribution.

  Raises:
    ValueError: if `dtype` is not a floating point type.
  """
  def _initializer(shape, dtype=_assert_float_dtype(dtype),
                   partition_info=None):
    return random_ops.random_normal(shape, mean, stddev, dtype, seed=seed)
  return _initializer 
Example #10
Source File: mvn.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _sample_n(self, n, seed=None):
    # Recall _assert_valid_mu ensures mu and self._cov have same batch shape.
    shape = array_ops.concat(0, [self._cov.vector_shape(), [n]])
    white_samples = random_ops.random_normal(shape=shape,
                                             mean=0.,
                                             stddev=1.,
                                             dtype=self.dtype,
                                             seed=seed)

    correlated_samples = self._cov.sqrt_matmul(white_samples)

    # Move the last dimension to the front
    perm = array_ops.concat(0, (
        array_ops.pack([array_ops.rank(correlated_samples) - 1]),
        math_ops.range(0, array_ops.rank(correlated_samples) - 1)))

    # TODO(ebrevdo): Once we get a proper tensor contraction op,
    # perform the inner product using that instead of batch_matmul
    # and this slow transpose can go away!
    correlated_samples = array_ops.transpose(correlated_samples, perm)
    samples = correlated_samples + self.mu
    return samples 
Example #11
Source File: student_t.py    From keras-lambda with MIT License 6 votes vote down vote up
def _sample_n(self, n, seed=None):
    # The sampling method comes from the fact that if:
    #   X ~ Normal(0, 1)
    #   Z ~ Chi2(df)
    #   Y = X / sqrt(Z / df)
    # then:
    #   Y ~ StudentT(df).
    shape = array_ops.concat([[n], self.batch_shape()], 0)
    normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed)
    df = self.df * array_ops.ones(self.batch_shape(), dtype=self.dtype)
    gamma_sample = random_ops.random_gamma(
        [n],
        0.5 * df,
        beta=0.5,
        dtype=self.dtype,
        seed=distribution_util.gen_new_seed(seed, salt="student_t"))
    samples = normal_sample / math_ops.sqrt(gamma_sample / df)
    return samples * self.sigma + self.mu  # Abs(sigma) not wanted. 
Example #12
Source File: linear_operator_test_util.py    From lambda-packs with MIT License 6 votes vote down vote up
def _make_x(self, operator, adjoint):
    # Return the number of systems for the argument 'x' for .matmul(x)
    r = self._get_num_systems(operator)
    # If operator.shape = [B1,...,Bb, M, N] this returns a random matrix of
    # shape [B1,...,Bb, N, R], R = 1 or 2.
    if operator.shape.is_fully_defined():
      batch_shape = operator.batch_shape.as_list()
      if adjoint:
        n = operator.range_dimension.value
      else:
        n = operator.domain_dimension.value
      x_shape = batch_shape + [n, r]
    else:
      batch_shape = operator.batch_shape_tensor()
      if adjoint:
        n = operator.range_dimension_tensor()
      else:
        n = operator.domain_dimension_tensor()
      x_shape = array_ops.concat((batch_shape, [n, r]), 0)

    return random_normal(x_shape, dtype=operator.dtype) 
Example #13
Source File: linear_operator_test_util.py    From lambda-packs with MIT License 6 votes vote down vote up
def _make_x(self, operator, adjoint):
    # Value of adjoint makes no difference because the operator is square.
    # Return the number of systems to solve, R, equal to 1 or 2.
    r = self._get_num_systems(operator)
    # If operator.shape = [B1,...,Bb, N, N] this returns a random matrix of
    # shape [B1,...,Bb, N, R], R = 1 or 2.
    if operator.shape.is_fully_defined():
      batch_shape = operator.batch_shape.as_list()
      n = operator.domain_dimension.value
      x_shape = batch_shape + [n, r]
    else:
      batch_shape = operator.batch_shape_tensor()
      n = operator.domain_dimension_tensor()
      x_shape = array_ops.concat((batch_shape, [n, r]), 0)

    return random_normal(x_shape, dtype=operator.dtype) 
Example #14
Source File: linear_operator_test_util.py    From keras-lambda with MIT License 6 votes vote down vote up
def _make_x(self, operator, adjoint):
    # Value of adjoint makes no difference because the operator is square.
    # Return the number of systems to solve, R, equal to 1 or 2.
    r = self._get_num_systems(operator)
    # If operator.shape = [B1,...,Bb, N, N] this returns a random matrix of
    # shape [B1,...,Bb, N, R], R = 1 or 2.
    if operator.shape.is_fully_defined():
      batch_shape = operator.batch_shape.as_list()
      n = operator.domain_dimension.value
      x_shape = batch_shape + [n, r]
    else:
      batch_shape = operator.batch_shape_dynamic()
      n = operator.domain_dimension_dynamic()
      x_shape = array_ops.concat((batch_shape, [n, r]), 0)

    return random_normal(x_shape, dtype=operator.dtype) 
Example #15
Source File: linear_operator_test_util.py    From keras-lambda with MIT License 6 votes vote down vote up
def _make_x(self, operator, adjoint):
    # Return the number of systems for the argument 'x' for .apply(x)
    r = self._get_num_systems(operator)
    # If operator.shape = [B1,...,Bb, M, N] this returns a random matrix of
    # shape [B1,...,Bb, N, R], R = 1 or 2.
    if operator.shape.is_fully_defined():
      batch_shape = operator.batch_shape.as_list()
      if adjoint:
        n = operator.range_dimension.value
      else:
        n = operator.domain_dimension.value
      x_shape = batch_shape + [n, r]
    else:
      batch_shape = operator.batch_shape_dynamic()
      if adjoint:
        n = operator.range_dimension_dynamic()
      else:
        n = operator.domain_dimension_dynamic()
      x_shape = array_ops.concat((batch_shape, [n, r]), 0)

    return random_normal(x_shape, dtype=operator.dtype) 
Example #16
Source File: init_ops.py    From keras-lambda with MIT License 5 votes vote down vote up
def __call__(self, shape, dtype=None, partition_info=None):
    if dtype is None:
      dtype = self.dtype
    return random_ops.random_normal(shape, self.mean, self.stddev,
                                    dtype, seed=self.seed) 
Example #17
Source File: attention_wrapper.py    From CommonSenseMultiHopQA with MIT License 5 votes vote down vote up
def _monotonic_probability_fn(score, previous_alignments, sigmoid_noise, mode,
                              seed=None):
  """Attention probability function for monotonic attention.

  Takes in unnormalized attention scores, adds pre-sigmoid noise to encourage
  the model to make discrete attention decisions, passes them through a sigmoid
  to obtain "choosing" probabilities, and then calls monotonic_attention to
  obtain the attention distribution.  For more information, see

  Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
  "Online and Linear-Time Attention by Enforcing Monotonic Alignments."
  ICML 2017.  https://arxiv.org/abs/1704.00784

  Args:
    score: Unnormalized attention scores, shape `[batch_size, alignments_size]`
    previous_alignments: Previous attention distribution, shape
      `[batch_size, alignments_size]`
    sigmoid_noise: Standard deviation of pre-sigmoid noise.  Setting this larger
      than 0 will encourage the model to produce large attention scores,
      effectively making the choosing probabilities discrete and the resulting
      attention distribution one-hot.  It should be set to 0 at test-time, and
      when hard attention is not desired.
    mode: How to compute the attention distribution.  Must be one of
      'recursive', 'parallel', or 'hard'.  See the docstring for
      `tf.contrib.seq2seq.monotonic_attention` for more information.
    seed: (optional) Random seed for pre-sigmoid noise.

  Returns:
    A `[batch_size, alignments_size]`-shape tensor corresponding to the
    resulting attention distribution.
  """
  # Optionally add pre-sigmoid noise to the scores
  if sigmoid_noise > 0:
    noise = random_ops.random_normal(array_ops.shape(score), dtype=score.dtype,
                                     seed=seed)
    score += sigmoid_noise*noise
  # Compute "choosing" probabilities from the attention scores
  p_choose_i = math_ops.sigmoid(score)
  # Convert from choosing probabilities to attention distribution
  return monotonic_attention(p_choose_i, previous_alignments, mode) 
Example #18
Source File: prefetch_queue_test.py    From keras-lambda with MIT License 5 votes vote down vote up
def testMultipleDequeue(self):
    with self.test_session() as sess:
      batch_size = 10
      image_size = 32
      num_batches = 4

      zero64 = constant_op.constant(0, dtype=dtypes.int64)

      examples = variables.Variable(zero64)
      counter = examples.count_up_to(num_batches * batch_size)
      image = random_ops.random_normal(
          [image_size, image_size, 3], dtype=dtypes.float32, name='images')
      label = random_ops.random_uniform(
          [1], 0, 10, dtype=dtypes.int32, name='labels')

      batches = input_lib.batch(
          [counter, image, label], batch_size=batch_size, num_threads=4)

      batcher = prefetch_queue.prefetch_queue(batches)
      batches_list = [batcher.dequeue() for _ in range(2)]

      variables.global_variables_initializer().run()
      threads = queue_runner_impl.start_queue_runners()

      value_counter = []
      for _ in range(int(num_batches / 2)):
        for batches in batches_list:
          results = sess.run(batches)
          value_counter.append(results[0])
          self.assertEquals(results[1].shape,
                            (batch_size, image_size, image_size, 3))
          self.assertEquals(results[2].shape, (batch_size, 1))

      self.assertAllEqual(
          np.sort(np.concatenate(value_counter)),
          np.arange(0, num_batches * batch_size))
      # Reached the limit.
      with self.assertRaises(errors_impl.OutOfRangeError):
        sess.run(batches)
      for thread in threads:
        thread.join() 
Example #19
Source File: factorization_ops.py    From keras-lambda with MIT License 5 votes vote down vote up
def _create_factors(cls, rows, cols, num_shards, init, name):
    """Helper function to create row and column factors."""
    if callable(init):
      init = init()
    if isinstance(init, list):
      assert len(init) == num_shards
    elif isinstance(init, str) and init == "random":
      pass
    elif num_shards == 1:
      init = [init]
    sharded_matrix = []
    sizes = cls._shard_sizes(rows, num_shards)
    assert len(sizes) == num_shards

    def make_initializer(i, size):

      def initializer():
        if init == "random":
          return random_ops.random_normal([size, cols])
        else:
          return init[i]

      return initializer

    for i, size in enumerate(sizes):
      var_name = "%s_shard_%d" % (name, i)
      var_init = make_initializer(i, size)
      sharded_matrix.append(
          variables.Variable(
              var_init, dtype=dtypes.float32, name=var_name))

    return sharded_matrix 
Example #20
Source File: normal.py    From keras-lambda with MIT License 5 votes vote down vote up
def _sample_n(self, n, seed=None):
    shape = array_ops.concat(([n], array_ops.shape(self.mean())), 0)
    sampled = random_ops.random_normal(
        shape=shape, mean=0, stddev=1, dtype=self.mu.dtype, seed=seed)
    return sampled * self.sigma + self.mu 
Example #21
Source File: linear_operator_test_util.py    From keras-lambda with MIT License 5 votes vote down vote up
def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None):
  """Tensor with (possibly complex) Gaussian entries.

  Samples are distributed like

  ```
  N(mean, stddev^2), if dtype is real,
  X + iY,  where X, Y ~ N(mean, stddev^2) if dtype is complex.
  ```

  Args:
    shape:  `TensorShape` or Python list.  Shape of the returned tensor.
    mean:  `Tensor` giving mean of normal to sample from.
    stddev:  `Tensor` giving stdev of normal to sample from.
    dtype:  `TensorFlow` `dtype` or numpy dtype
    seed:  Python integer seed for the RNG.

  Returns:
    `Tensor` with desired shape and dtype.
  """
  dtype = dtypes.as_dtype(dtype)

  with ops.name_scope("random_normal"):
    samples = random_ops.random_normal(
        shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
    if dtype.is_complex:
      if seed is not None:
        seed += 1234
      more_samples = random_ops.random_normal(
          shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
      samples = math_ops.complex(samples, more_samples)
    return samples 
Example #22
Source File: prefetch_queue_test.py    From keras-lambda with MIT License 5 votes vote down vote up
def testMultiThread(self):
    with self.test_session() as sess:
      batch_size = 10
      image_size = 32
      num_batches = 5

      zero64 = constant_op.constant(0, dtype=dtypes.int64)

      examples = variables.Variable(zero64)
      counter = examples.count_up_to(num_batches * batch_size)
      image = random_ops.random_normal(
          [image_size, image_size, 3], dtype=dtypes.float32, name='images')
      label = random_ops.random_uniform(
          [1], 0, 10, dtype=dtypes.int32, name='labels')

      batches = input_lib.batch(
          [counter, image, label], batch_size=batch_size, num_threads=4)

      batches = prefetch_queue.prefetch_queue(batches).dequeue()

      variables.global_variables_initializer().run()
      threads = queue_runner_impl.start_queue_runners()

      value_counter = []
      for _ in range(num_batches):
        results = sess.run(batches)
        value_counter.append(results[0])
        self.assertEqual(results[1].shape,
                         (batch_size, image_size, image_size, 3))
        self.assertEqual(results[2].shape, (batch_size, 1))

      self.assertAllEqual(
          np.sort(np.concatenate(value_counter)),
          np.arange(0, num_batches * batch_size))
      # Reached the limit.
      with self.assertRaises(errors_impl.OutOfRangeError):
        sess.run(batches)
      for thread in threads:
        thread.join() 
Example #23
Source File: attention_wrapper.py    From QGforQA with MIT License 5 votes vote down vote up
def _monotonic_probability_fn(score, previous_alignments, sigmoid_noise, mode,
                              seed=None):
  """Attention probability function for monotonic attention.

  Takes in unnormalized attention scores, adds pre-sigmoid noise to encourage
  the model to make discrete attention decisions, passes them through a sigmoid
  to obtain "choosing" probabilities, and then calls monotonic_attention to
  obtain the attention distribution.  For more information, see

  Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
  "Online and Linear-Time Attention by Enforcing Monotonic Alignments."
  ICML 2017.  https://arxiv.org/abs/1704.00784

  Args:
    score: Unnormalized attention scores, shape `[batch_size, alignments_size]`
    previous_alignments: Previous attention distribution, shape
      `[batch_size, alignments_size]`
    sigmoid_noise: Standard deviation of pre-sigmoid noise.  Setting this larger
      than 0 will encourage the model to produce large attention scores,
      effectively making the choosing probabilities discrete and the resulting
      attention distribution one-hot.  It should be set to 0 at test-time, and
      when hard attention is not desired.
    mode: How to compute the attention distribution.  Must be one of
      'recursive', 'parallel', or 'hard'.  See the docstring for
      `tf.contrib.seq2seq.monotonic_attention` for more information.
    seed: (optional) Random seed for pre-sigmoid noise.

  Returns:
    A `[batch_size, alignments_size]`-shape tensor corresponding to the
    resulting attention distribution.
  """
  # Optionally add pre-sigmoid noise to the scores
  if sigmoid_noise > 0:
    noise = random_ops.random_normal(array_ops.shape(score), dtype=score.dtype,
                                     seed=seed)
    score += sigmoid_noise*noise
  # Compute "choosing" probabilities from the attention scores
  p_choose_i = math_ops.sigmoid(score)
  # Convert from choosing probabilities to attention distribution
  return monotonic_attention(p_choose_i, previous_alignments, mode) 
Example #24
Source File: normal.py    From lambda-packs with MIT License 5 votes vote down vote up
def _sample_n(self, n, seed=None):
    shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
    sampled = random_ops.random_normal(
        shape=shape, mean=0., stddev=1., dtype=self.loc.dtype, seed=seed)
    return sampled * self.scale + self.loc 
Example #25
Source File: sparse_optimizers.py    From rigl with Apache License 2.0 5 votes vote down vote up
def get_grow_tensor(self, weights, method):
    """Different ways to initialize new connections.

    Args:
      weights: tf.Tensor or Variable.
      method: str, available options: 'zeros', 'random_normal', 'random_uniform'
        and 'initial_value'

    Returns:
      tf.Tensor same shape and type as weights.

    Raises:
      ValueError, when the method is not valid.
    """
    if not isinstance(method, six.string_types):
      raise ValueError('Grow-Init: %s is not a string' % method)

    if method == 'zeros':
      grow_tensor = array_ops.zeros_like(weights, dtype=weights.dtype)
    elif method.startswith('initial_dist'):
      original_shape = weights.initial_value.shape
      divisor = extract_number(method)
      grow_tensor = array_ops.reshape(
          random_ops.random_shuffle(array_ops.reshape(
              weights.initial_value, [-1])),
          original_shape) / divisor
    elif method.startswith('random_normal'):
      stddev = math_ops.reduce_std(weights)
      divisor = extract_number(method)
      grow_tensor = self._random_normal(
          weights.shape, stddev=stddev, dtype=weights.dtype,
          seed=hash(weights.name + 'grow_init_n')) / divisor
    elif method.startswith('random_uniform'):
      mean = math_ops.reduce_mean(math_ops.abs(weights))
      divisor = extract_number(method)
      grow_tensor = self._random_uniform(
          weights.shape, minval=-mean, maxval=mean, dtype=weights.dtype,
          seed=hash(weights.name + 'grow_init_u')) / divisor
    else:
      raise ValueError('Grow-Init: %s is not a valid option.' % method)
    return grow_tensor 
Example #26
Source File: tadam.py    From am3 with Apache License 2.0 5 votes vote down vote up
def __call__(self, shape, dtype=None, partition_info=None):
        if dtype is None:
            dtype = self.dtype

        if shape:
            n = float(shape[-1])
        else:
            n = 1.0
        for dim in shape[:-2]:
            n *= float(dim)

        self.stddev = np.sqrt(self.factor * 2.0 / n)
        return random_ops.random_normal(shape, self.mean, self.stddev,
                                        dtype, seed=self.seed) 
Example #27
Source File: AM3_protonet++.py    From am3 with Apache License 2.0 5 votes vote down vote up
def __call__(self, shape, dtype=None, partition_info=None):
        if dtype is None:
            dtype = self.dtype

        if shape:
            n = float(shape[-1])
        else:
            n = 1.0
        for dim in shape[:-2]:
            n *= float(dim)

        self.stddev = np.sqrt(self.factor * 2.0 / n)
        return random_ops.random_normal(shape, self.mean, self.stddev,
                                        dtype, seed=self.seed) 
Example #28
Source File: protonet++.py    From am3 with Apache License 2.0 5 votes vote down vote up
def __call__(self, shape, dtype=None, partition_info=None):
        if dtype is None:
            dtype = self.dtype

        if shape:
            n = float(shape[-1])
        else:
            n = 1.0
        for dim in shape[:-2]:
            n *= float(dim)

        self.stddev = np.sqrt(self.factor * 2.0 / n)
        return random_ops.random_normal(shape, self.mean, self.stddev,
                                        dtype, seed=self.seed) 
Example #29
Source File: sparse_optimizers.py    From rigl with Apache License 2.0 5 votes vote down vote up
def _random_normal(self, *args, **kwargs):
    if self._use_stateless:
      c_seed = self._stateless_seed_offset + kwargs['seed']
      kwargs['seed'] = math_ops.cast(
          array_ops.stack([c_seed, self._global_step]), dtypes.int32)
      return stateless_random_ops.stateless_random_normal(*args, **kwargs)
    else:
      return random_ops.random_normal(*args, **kwargs) 
Example #30
Source File: AM3_TADAM.py    From am3 with Apache License 2.0 5 votes vote down vote up
def __call__(self, shape, dtype=None, partition_info=None):
        if dtype is None:
            dtype = self.dtype

        if shape:
            n = float(shape[-1])
        else:
            n = 1.0
        for dim in shape[:-2]:
            n *= float(dim)

        self.stddev = np.sqrt(self.factor * 2.0 / n)
        return random_ops.random_normal(shape, self.mean, self.stddev,
                                        dtype, seed=self.seed)