Python tensorflow.real() Examples

The following are 30 code examples of tensorflow.real(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: reversible_layers.py    From BERT with Apache License 2.0 6 votes vote down vote up
def __init__(self, layer, mask, temperature, **kwargs):
    """Constructs flow.

    Args:
      layer: Two-headed masked network taking the inputs and returning a
        real-valued Tensor of shape `[..., length, 2*vocab_size]`.
        Alternatively, `layer` may return a Tensor of shape
        `[..., length, vocab_size]` to be used as the location transform; the
        scale transform will be hard-coded to 1.
      mask: binary Tensor of shape `[length]` forming the bipartite assignment.
      temperature: Positive value determining bias of gradient estimator.
      **kwargs: kwargs of parent class.
    """
    super(DiscreteBipartiteFlow, self).__init__(**kwargs)
    self.layer = layer
    self.mask = mask
    self.temperature = temperature 
Example #2
Source File: reversible_layers.py    From BERT with Apache License 2.0 6 votes vote down vote up
def one_hot_add(inputs, shift):
  """Performs (inputs + shift) % vocab_size in the one-hot space.

  Args:
    inputs: Tensor of shape `[..., vocab_size]`. Typically a soft/hard one-hot
      Tensor.
    shift: Tensor of shape `[..., vocab_size]`. Typically a soft/hard one-hot
      Tensor specifying how much to shift the corresponding one-hot vector in
      inputs. Soft values perform a "weighted shift": for example,
      shift=[0.2, 0.3, 0.5] performs a linear combination of 0.2 * shifting by
      zero; 0.3 * shifting by one; and 0.5 * shifting by two.

  Returns:
    Tensor of same shape and dtype as inputs.
  """
  # Compute circular 1-D convolution with shift as the kernel.
  inputs = tf.cast(inputs, tf.complex64)
  shift = tf.cast(shift, tf.complex64)
  return tf.real(tf.signal.ifft(tf.signal.fft(inputs) * tf.signal.fft(shift))) 
Example #3
Source File: audio.py    From avsr-tf1 with GNU General Public License v3.0 6 votes vote down vote up
def compute_log_mel_spectrograms(stfts, hparams):
    # power_spectrograms = tf.real(stfts * tf.conj(stfts))
    magnitude_spectrograms = tf.abs(stfts)

    num_spectrogram_bins = magnitude_spectrograms.shape[-1].value

    linear_to_mel_weight_matrix = signal.linear_to_mel_weight_matrix(
        hparams.num_mel_bins, num_spectrogram_bins, hparams.sample_rate, hparams.mel_lower_edge_hz,
        hparams.mel_upper_edge_hz)

    mel_spectrograms = tf.tensordot(
        magnitude_spectrograms, linear_to_mel_weight_matrix, 1)

    # Note: Shape inference for `tf.tensordot` does not currently handle this case.
    mel_spectrograms.set_shape(magnitude_spectrograms.shape[:-1].concatenate(
        linear_to_mel_weight_matrix.shape[-1:]))

    log_offset = 1e-6
    log_mel_spectrograms = tf.log(mel_spectrograms + log_offset)

    return log_mel_spectrograms 
Example #4
Source File: scattering.py    From DeepLearningImplementations with MIT License 6 votes vote down vote up
def compute_fft(x, direction="C2C", inverse=False):

    if direction == 'C2R':
        inverse = True

    x_shape = x.get_shape().as_list()
    h, w = x_shape[-2], x_shape[-3]

    x_complex = tf.complex(x[..., 0], x[..., 1])

    if direction == 'C2R':
        out = tf.real(tf.ifft2d(x_complex)) * h * w
        return out

    else:
        if inverse:
            out = stack_real_imag(tf.ifft2d(x_complex)) * h * w
        else:
            out = stack_real_imag(tf.fft2d(x_complex))
        return out 
Example #5
Source File: losses.py    From QMLT with Apache License 2.0 6 votes vote down vote up
def trace_distance(rho, sigma):
    r""" Trace distance :math:`\frac{1}{2}\tr \{ \sqrt{ (\rho - \sigma})^2  \}` between quantum states :math:`\rho` and :math:`\sigma`.

    The inputs and outputs are tensors of dtype float, and all computations support automatic differentiation.

    Args:
        rho (tf.Tensor): 2-dimensional Hermitian matrix representing state :math:`\rho`.
        sigma (tf.Tensor): 2-dimensional Hermitian matrix of the same dimensions and dtype as rho,
            representing state :math:`\sigma`.

    Returns:
        tf.Tensor: Returns the scalar trace distance.
    """

    if rho.shape != sigma.shape:
        raise ValueError("Cannot compute the trace distance if inputs have"
                         " different shapes {} and {}".format(rho.shape, sigma.shape))

    diff = rho - sigma
    eig = tf.self_adjoint_eigvals(diff)
    abs_eig = tf.abs(eig)
    return 0.5*tf.real(tf.reduce_sum(abs_eig)) 
Example #6
Source File: losses.py    From QMLT with Apache License 2.0 6 votes vote down vote up
def expectation(rho, operator):
    r""" Expectation value :math:`\tr\{ \rho O\}` of operator :math:`O` with respect to the quantum state :math:`\rho`.

    The inputs and outputs are tensors of dtype float, and all computations support automatic differentiation.


    Args:
        rho (tf.Tensor) : 2-dimensional Hermitian tensor representing state :math:`\rho`.
        operator (tf.Tensor):  2-dimensional Hermitian tensor of the same dimensions and dtype as rho.

    Returns:
        tf.Tensor: Returns the scalar expectation value.

    """
    if rho.shape != operator.shape:
        raise ValueError("Cannot compute expectation value if rho and operator have"
                         " different shapes {} and {}".format(rho.shape, operator.shape))
    if len(rho.shape) != 2:
        raise ValueError("Expectation loss expects a 2-d array representing a density matrix.")

    exp = tf.real(tf.trace(tf.matmul(rho, operator)))
    return exp 
Example #7
Source File: tf_image.py    From burst-denoising with Apache License 2.0 6 votes vote down vote up
def hdrplus_merge(imgs, N, c, sig):
    ccast_tf = lambda x : tf.complex(x, tf.zeros_like(x))

    # imgs is [batch, h, w, ch]
    rcw = tf.expand_dims(rcwindow(N), axis=-1)
    imgs = imgs * rcw
    imgs = tf.transpose(imgs, [0, 3, 1, 2])
    imgs_f = tf.fft2d(ccast_tf(imgs))
    imgs_f = tf.transpose(imgs_f, [0, 2, 3, 1])
    Dz2 = tf.square(tf.abs(imgs_f[...,0:1] - imgs_f))
    Az = Dz2 / (Dz2 + c*sig**2)
    filt0 = 1 + tf.expand_dims(tf.reduce_sum(Az[...,1:], axis=-1), axis=-1)
    filts = tf.concat([filt0, 1 - Az[...,1:]], axis=-1)
    output_f = tf.reduce_mean(imgs_f * ccast_tf(filts), axis=-1)
    output_f = tf.real(tf.ifft2d(output_f))

    return output_f 
Example #8
Source File: soundrep.py    From multisensory with Apache License 2.0 6 votes vote down vote up
def griffin_lim(spec, 
                frame_length,
                frame_step,
                num_fft,
                num_iters = 1):
                #num_iters = 20):
                #num_iters = 10):
  invert_spec = lambda spec : tf.contrib.signal.inverse_stft(spec, frame_length, frame_step, num_fft)

  spec_mag = tf.cast(tf.abs(spec), dtype=tf.complex64)
  best = tf.identity(spec)
  for i in range(num_iters):
    samples = invert_spec(best)
    est = tf.contrib.signal.stft(samples, frame_length, frame_step, num_fft, pad_end = False)  # (1, T, n_fft/2+1)
    phase = est / tf.cast(tf.maximum(1e-8, tf.abs(est)), tf.complex64) 
    best = spec_mag * phase
  X_t = invert_spec(best)
  y = tf.real(X_t)
  y = cast_float(y)
  return y 
Example #9
Source File: cwise_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _compareGradient(self, x):
    # x[:, 0] is real, x[:, 1] is imag.  We combine real and imag into
    # complex numbers. Then, we extract real and imag parts and
    # computes the squared sum. This is obviously the same as sum(real
    # * real) + sum(imag * imag). We just want to make sure the
    # gradient function is checked.
    with self.test_session():
      inx = tf.convert_to_tensor(x)
      real, imag = tf.split(1, 2, inx)
      real, imag = tf.reshape(real, [-1]), tf.reshape(imag, [-1])
      cplx = tf.complex(real, imag)
      cplx = tf.conj(cplx)
      loss = tf.reduce_sum(
          tf.square(tf.real(cplx))) + tf.reduce_sum(
              tf.square(tf.imag(cplx)))
      epsilon = 1e-3
      jacob_t, jacob_n = tf.test.compute_gradient(inx,
                                                  list(x.shape),
                                                  loss,
                                                  [1],
                                                  x_init_value=x,
                                                  delta=epsilon)
    self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon) 
Example #10
Source File: tfutil.py    From multisensory with Apache License 2.0 5 votes vote down vote up
def angle(z):
  # from https://github.com/tensorflow/tensorflow/issues/483
  """
  Returns the elementwise arctan of z, choosing the quadrant correctly.

  Quadrant I: arctan(y/x)
  Qaudrant II: \pi + arctan(y/x) (phase of x<0, y=0 is \pi)
  Quadrant III: -\pi + arctan(y/x)
  Quadrant IV: arctan(y/x)

  Inputs:
      z: tf.complex64 or tf.complex128 tensor
  Retunrs:
      Angle of z
  """
  return tf.atan2(tf.imag(z), tf.real(z))
  # if z.dtype == tf.complex128:
  #     dtype = tf.float64
  # else:
  #     dtype = tf.float32
  # x = tf.real(z)
  # y = tf.imag(z)
  # xneg = tf.cast(x < 0.0, dtype)
  # yneg = tf.cast(y < 0.0, dtype)
  # ypos = tf.cast(y >= 0.0, dtype)

  # offset = xneg * (ypos - yneg) * np.pi

  # return tf.atan(y / x) + offset 
Example #11
Source File: tfmri_test.py    From dl-cs with MIT License 5 votes vote down vote up
def test_channels_to_complex(self):
        data = tf.random_uniform([2, 10, 10, 2])
        data_complex = tfmri.channels_to_complex(data)
        diff_r = np.real(data_complex) - data[..., 0:1]
        diff_i = np.imag(data_complex) - data[..., 1:]
        diff = np.mean(diff_r ** 2 + diff_i ** 2)
        self.assertTrue(diff < eps)

        data_complex = tfmri.channels_to_complex(
            data, data_format='channels_first')
        diff_r = np.real(data_complex) - data[:, 0:5, ...]
        diff_i = np.imag(data_complex) - data[:, 5:, ...]
        diff = np.mean(diff_r ** 2 + diff_i ** 2)
        self.assertTrue(diff < eps)

        data = tf.random_uniform([10, 10, 2])
        data_complex = tfmri.channels_to_complex(
            data, data_format='channels_first')
        diff_r = np.real(data_complex) - data[0:5, ...]
        diff_i = np.imag(data_complex) - data[5:, ...]
        diff = np.mean(diff_r ** 2 + diff_i ** 2)
        self.assertTrue(diff < eps)

        with self.assertRaises(TypeError):
            # Not enough dimensions
            tfmri.channels_to_complex(tf.random_uniform([10, 10]))
        with self.assertRaises(TypeError):
            # Too many dimensions
            tfmri.channels_to_complex(tf.random_uniform([10, 10, 1, 1, 1]))
        with self.assertRaises(TypeError):
            tfmri.channels_to_complex(tf.random_uniform([10, 10, 1]))
        with self.assertRaises(TypeError):
            tfmri.channels_to_complex(
                tf.random_uniform([5, 10, 1]), data_format='channels_first')
        with self.assertRaises(TypeError):
            tfmri.channels_to_complex(
                tf.random_uniform([1, 5, 10, 1]), data_format='channels_first') 
Example #12
Source File: tfmri.py    From dl-cs with MIT License 5 votes vote down vote up
def complex_to_channels(image,
                        data_format='channels_last',
                        name='complex2channels'):
    """Convert data from complex to channels."""
    if len(image.shape) != 3 and len(image.shape) != 4:
        raise TypeError('Input data must be have 3 or 4 dimensions')

    axis_c = -1 if data_format == 'channels_last' else -3

    if image.dtype is not tf.complex64 and image.dtype is not tf.complex128:
        raise TypeError('Input data must be complex')

    with tf.name_scope(name):
        image_out = tf.concat((tf.real(image), tf.imag(image)), axis_c)
    return image_out 
Example #13
Source File: HolE.py    From KagNet with MIT License 5 votes vote down vote up
def _ccorr(self, a, b):
		a = tf.cast(a, tf.complex64)
		b = tf.cast(b, tf.complex64)
		return tf.real(tf.ifft(tf.conj(tf.fft(a)) * tf.fft(b))) 
Example #14
Source File: models.py    From spherical-cnn with MIT License 5 votes vote down vote up
def init_sphcnn(args):
    method = args.transform_method
    real = args.real_inputs
    if method == 'naive':
        fun = lambda *args, **kwargs: spherical.sph_harm_all(*args, **kwargs, real=real)

    with tf.name_scope('harmonics_or_legendre'):
        res = args.input_res
        harmonics = [fun(res // (2**i), as_tfvar=True) for i in range(sum(args.pool_layers) + 1)]

    return harmonics 
Example #15
Source File: utils.py    From joie-kdd19 with MIT License 5 votes vote down vote up
def circular_correlation(h, t):
    return tf.real(tf.spectral.ifft(tf.multiply(tf.conj(tf.spectral.fft(tf.complex(h, 0.))), tf.spectral.fft(tf.complex(t, 0.))))) 
Example #16
Source File: model.py    From DeepMRI with GNU General Public License v3.0 5 votes vote down vote up
def complex2real(x):
    x_real = tf.real(x)
    x_imag = tf.imag(x)
    return tf.concat([x_real,x_imag], axis=-1) 
Example #17
Source File: model.py    From DeepMRI with GNU General Public License v3.0 5 votes vote down vote up
def dc(generated, X_k, mask):
    gene_complex = real2complex(generated)
    gene_complex = tf.transpose(gene_complex,[0, 3, 1, 2])
    mask = tf.transpose(mask,[0, 3, 1, 2])
    X_k = tf.transpose(X_k,[0, 3, 1, 2])
    gene_fft = tf.fft2d(gene_complex)
    out_fft = X_k + gene_fft * (1.0 - mask)
    output_complex = tf.ifft2d(out_fft)
    output_complex = tf.transpose(output_complex, [0, 2, 3, 1])
    output_real = tf.cast(tf.real(output_complex), dtype=tf.float32)
    output_imag = tf.cast(tf.imag(output_complex), dtype=tf.float32)
    output = tf.concat([output_real,output_imag], axis=-1)
    return output 
Example #18
Source File: preprocessors.py    From TC-ResNet with Apache License 2.0 5 votes vote down vote up
def _log_mel_spectrogram(self, audio, window_size_samples, window_stride_samples,
                             magnitude_squared, **kwargs):
        # only accept single channels
        audio = tf.squeeze(audio, -1)
        stfts = tf.contrib.signal.stft(audio,
                                       frame_length=window_size_samples,
                                       frame_step=window_stride_samples)

        # If magnitude_squared = True(power_spectrograms)#, tf.real(stfts * tf.conj(stfts))
        # If magnitude_squared = False(magnitude_spectrograms), tf.abs(stfts)
        if magnitude_squared:
            spectrograms = tf.real(stfts * tf.conj(stfts))
        else:
            spectrograms = tf.abs(stfts)

        num_spectrogram_bins = spectrograms.shape[-1].value
        linear_to_mel_weight_matrix = tf.contrib.signal.linear_to_mel_weight_matrix(
            kwargs["num_mel_bins"],
            num_spectrogram_bins,
            kwargs["sample_rate"],
            kwargs["lower_edge_hertz"],
            kwargs["upper_edge_hertz"],
        )

        mel_spectrograms = tf.tensordot(spectrograms, linear_to_mel_weight_matrix, 1)
        mel_spectrograms.set_shape(
            spectrograms.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:])
        )

        log_offset = 1e-6
        log_mel_spectrograms = tf.log(mel_spectrograms + log_offset)

        return log_mel_spectrograms 
Example #19
Source File: train_mri_vn.py    From mri-variationalnetwork with MIT License 5 votes vote down vote up
def mriForwardOpWithOS(self, u, coil_sens, sampling_mask):
        with tf.variable_scope('mriForwardOp'):
            # add frequency encoding oversampling
            pad_u = tf.cast(tf.multiply(tf.cast(tf.shape(sampling_mask)[1], tf.float32), 0.25) + 1, tf.int32)
            pad_l = tf.cast(tf.multiply(tf.cast(tf.shape(sampling_mask)[1], tf.float32), 0.25) - 1, tf.int32)
            u_pad = tf.pad(u, [[0, 0], [pad_u, pad_l], [0, 0]])
            u_pad = tf.expand_dims(u_pad, axis=1)
            # apply sensitivites
            coil_imgs = u_pad * coil_sens
            # centered Fourier transform
            Fu = tf.contrib.icg.fftc2d(coil_imgs)
            # apply sampling mask
            mask = tf.expand_dims(sampling_mask, axis=1)
            kspace = tf.complex(tf.real(Fu) * mask, tf.imag(Fu) * mask)
        return kspace 
Example #20
Source File: HolE.py    From CPL with MIT License 5 votes vote down vote up
def _ccorr(self, a, b):
		a = tf.cast(a, tf.complex64)
		b = tf.cast(b, tf.complex64)
		return tf.real(tf.ifft(tf.conj(tf.fft(a)) * tf.fft(b))) 
Example #21
Source File: scattering.py    From DeepLearningImplementations with MIT License 5 votes vote down vote up
def stack_real_imag(x):

    stack_axis = len(x.get_shape().as_list())
    return tf.stack((tf.real(x), tf.imag(x)), axis=stack_axis) 
Example #22
Source File: scattering.py    From DeepLearningImplementations with MIT License 5 votes vote down vote up
def _prepare_padding_size(self, s):
        M = s[-2]
        N = s[-1]

        self.M_padded = ((M + 2 ** (self.J)) // 2**self.J + 1) * 2**self.J
        self.N_padded = ((N + 2 ** (self.J)) // 2**self.J + 1) * 2**self.J

        s[-2] = self.M_padded
        s[-1] = self.N_padded
        self.padded_size_batch = [a for a in s]

    # This function copies and view the real to complex 
Example #23
Source File: models.py    From DeepLearningImplementations with MIT License 5 votes vote down vote up
def _prepare_padding_size(self, s):
        M = s[-2]
        N = s[-1]

        self.M_padded = ((M + 2 ** (self.J)) // 2**self.J + 1) * 2**self.J
        self.N_padded = ((N + 2 ** (self.J)) // 2**self.J + 1) * 2**self.J

        s[-2] = self.M_padded
        s[-1] = self.N_padded
        self.padded_size_batch = [a for a in s]

    # This function copies and view the real to complex 
Example #24
Source File: models.py    From DeepLearningImplementations with MIT License 5 votes vote down vote up
def stack_real_imag(x):

    stack_axis = len(x.get_shape().as_list())
    return tf.stack((tf.real(x), tf.imag(x)), axis=stack_axis) 
Example #25
Source File: cwise_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testRealImag128(self):
    real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
    imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
    cplx = real + 1j * imag
    self._compareRealImag(cplx, use_gpu=False)
    self._compareRealImag(cplx, use_gpu=True) 
Example #26
Source File: reversible_layers.py    From BERT with Apache License 2.0 5 votes vote down vote up
def __init__(self, layer, temperature, **kwargs):
    """Constructs flow.

    Args:
      layer: Two-headed masked network taking the inputs and returning a
        real-valued Tensor of shape `[..., length, 2*vocab_size]`.
        Alternatively, `layer` may return a Tensor of shape
        `[..., length, vocab_size]` to be used as the location transform; the
        scale transform will be hard-coded to 1.
      temperature: Positive value determining bias of gradient estimator.
      **kwargs: kwargs of parent class.
    """
    super(DiscreteAutoregressiveFlow, self).__init__(**kwargs)
    self.layer = layer
    self.temperature = temperature 
Example #27
Source File: reversible_layers.py    From BERT with Apache License 2.0 5 votes vote down vote up
def __init__(self, layer, temperature, **kwargs):
    """Constructs flow.

    Args:
      layer: Masked network taking inputs with shape `[..., length, vocab_size]`
        and returning a real-valued Tensor of shape
        `[..., length, vocab_size ** 2]`. Sinkhorn iterations are applied to
        each `layer` output to produce permutation matrices.
      temperature: Positive value determining bias of gradient estimator.
      **kwargs: kwargs of parent class.
    """
    super(SinkhornAutoregressiveFlow, self).__init__(**kwargs)
    self.layer = layer
    self.temperature = temperature 
Example #28
Source File: ops.py    From tfdeploy with MIT License 5 votes vote down vote up
def test_Real(self):
        t = tf.real(tf.Variable(self.random(3, 4, complex=True)))
        self.check(t)


    #
    # Fourier transform ops
    # 
Example #29
Source File: layers.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputx):
        
        assert inputx.dtype in [tf.complex64, tf.complex128], 'inputx is not complex.'
        
        return tf.concat([tf.real(inputx), tf.imag(inputx)], -1) 
Example #30
Source File: BP_Decoder.py    From Iterative-BP-CNN with GNU General Public License v3.0 5 votes vote down vote up
def one_bp_iteration(self, xe_v2c_pre_iter, H_sumC_to_V, H_sumV_to_C, xe_0):
        xe_tanh = tf.tanh(tf.to_double(tf.truediv(xe_v2c_pre_iter, [2.0])))
        xe_tanh = tf.to_float(xe_tanh)
        xe_tanh_temp = tf.sign(xe_tanh)
        xe_sum_log_img = tf.matmul(H_sumC_to_V, tf.multiply(tf.truediv((1 - xe_tanh_temp), [2.0]), [3.1415926]))
        xe_sum_log_real = tf.matmul(H_sumC_to_V, tf.log(1e-8 + tf.abs(xe_tanh)))
        xe_sum_log_complex = tf.complex(xe_sum_log_real, xe_sum_log_img)
        xe_product = tf.real(tf.exp(xe_sum_log_complex))
        xe_product_temp = tf.multiply(tf.sign(xe_product), -2e-7)
        xe_pd_modified = tf.add(xe_product, xe_product_temp)
        xe_v_sumc = tf.multiply(self.atanh(xe_pd_modified), [2.0])
        xe_c_sumv = tf.add(xe_0, tf.matmul(H_sumV_to_C, xe_v_sumc))
        return xe_v_sumc, xe_c_sumv