Python tensorflow.fft2d() Examples
The following are 9
code examples of tensorflow.fft2d().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: layers.py From neuron with GNU General Public License v3.0 | 6 votes |
def call(self, inputx): if not inputx.dtype in [tf.complex64, tf.complex128]: print('Warning: inputx is not complex. Converting.', file=sys.stderr) # if inputx is float, this will assume 0 imag channel inputx = tf.cast(inputx, tf.complex64) # get the right fft if self.ndims == 1: fft = tf.fft elif self.ndims == 2: fft = tf.fft2d else: fft = tf.fft3d perm_dims = [0, self.ndims + 1] + list(range(1, self.ndims + 1)) invert_perm_ndims = [0] + list(range(2, self.ndims + 2)) + [1] perm_inputx = K.permute_dimensions(inputx, perm_dims) # [batch_size, nb_features, *vol_size] fft_inputx = fft(perm_inputx) return K.permute_dimensions(fft_inputx, invert_perm_ndims)
Example #2
Source File: tf_image.py From burst-denoising with Apache License 2.0 | 6 votes |
def hdrplus_merge(imgs, N, c, sig): ccast_tf = lambda x : tf.complex(x, tf.zeros_like(x)) # imgs is [batch, h, w, ch] rcw = tf.expand_dims(rcwindow(N), axis=-1) imgs = imgs * rcw imgs = tf.transpose(imgs, [0, 3, 1, 2]) imgs_f = tf.fft2d(ccast_tf(imgs)) imgs_f = tf.transpose(imgs_f, [0, 2, 3, 1]) Dz2 = tf.square(tf.abs(imgs_f[...,0:1] - imgs_f)) Az = Dz2 / (Dz2 + c*sig**2) filt0 = 1 + tf.expand_dims(tf.reduce_sum(Az[...,1:], axis=-1), axis=-1) filts = tf.concat([filt0, 1 - Az[...,1:]], axis=-1) output_f = tf.reduce_mean(imgs_f * ccast_tf(filts), axis=-1) output_f = tf.real(tf.ifft2d(output_f)) return output_f
Example #3
Source File: scattering.py From DeepLearningImplementations with MIT License | 6 votes |
def compute_fft(x, direction="C2C", inverse=False): if direction == 'C2R': inverse = True x_shape = x.get_shape().as_list() h, w = x_shape[-2], x_shape[-3] x_complex = tf.complex(x[..., 0], x[..., 1]) if direction == 'C2R': out = tf.real(tf.ifft2d(x_complex)) * h * w return out else: if inverse: out = stack_real_imag(tf.ifft2d(x_complex)) * h * w else: out = stack_real_imag(tf.fft2d(x_complex)) return out
Example #4
Source File: models.py From DeepLearningImplementations with MIT License | 6 votes |
def compute_fft(x, direction="C2C", inverse=False): if direction == 'C2R': inverse = True x_shape = x.get_shape().as_list() h, w = x_shape[-2], x_shape[-3] x_complex = tf.complex(x[..., 0], x[..., 1]) if direction == 'C2R': out = tf.real(tf.ifft2d(x_complex)) * h * w return out else: if inverse: out = stack_real_imag(tf.ifft2d(x_complex)) * h * w else: out = stack_real_imag(tf.fft2d(x_complex)) return out
Example #5
Source File: ops.py From tfdeploy with MIT License | 5 votes |
def test_FFT2D(self): # only defined for gpu if DEVICE == GPU: t = tf.fft2d(self.random(3, 4, complex=True)) self.check(t)
Example #6
Source File: fft_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def _tfFFTForRank(self, rank): if rank == 1: return tf.fft elif rank == 2: return tf.fft2d elif rank == 3: return tf.fft3d else: raise ValueError("invalid rank")
Example #7
Source File: tfmri.py From dl-cs with MIT License | 5 votes |
def fft2c(im, data_format='channels_last', orthonorm=True, transpose=False, name='fft2c'): """Centered FFT2 on last two non-channel dimensions.""" with tf.name_scope(name): im_out = im if data_format == 'channels_last': permute_orig = np.arange(len(im.shape)) permute = permute_orig.copy() permute[-3] = permute_orig[-1] permute[-2:] = permute_orig[-3:-1] im_out = tf.transpose(im_out, permute) if orthonorm: fftscale = tf.sqrt( tf.cast(im_out.shape[-1], tf.float32) * tf.cast( im_out.shape[-2], tf.float32)) else: fftscale = 1.0 fftscale = tf.cast(fftscale, dtype=tf.complex64) im_out = fftshift(im_out, axis=(-2, -1)) if transpose: im_out = tf.ifft2d(im_out) * fftscale else: im_out = tf.fft2d(im_out) / fftscale im_out = fftshift(im_out, axis=(-2, -1)) if data_format == 'channels_last': permute[-3:-1] = permute_orig[-2:] permute[-1] = permute_orig[-3] im_out = tf.transpose(im_out, permute) return im_out
Example #8
Source File: model.py From DeepMRI with GNU General Public License v3.0 | 5 votes |
def dc(generated, X_k, mask): gene_complex = real2complex(generated) gene_complex = tf.transpose(gene_complex,[0, 3, 1, 2]) mask = tf.transpose(mask,[0, 3, 1, 2]) X_k = tf.transpose(X_k,[0, 3, 1, 2]) gene_fft = tf.fft2d(gene_complex) out_fft = X_k + gene_fft * (1.0 - mask) output_complex = tf.ifft2d(out_fft) output_complex = tf.transpose(output_complex, [0, 2, 3, 1]) output_real = tf.cast(tf.real(output_complex), dtype=tf.float32) output_imag = tf.cast(tf.imag(output_complex), dtype=tf.float32) output = tf.concat([output_real,output_imag], axis=-1) return output
Example #9
Source File: data.py From DeepMRI with GNU General Public License v3.0 | 5 votes |
def setup_inputs(x, mask, batch_size): channel = x.shape[-1].value // 2 mask = np.tile(mask, (channel, 1, 1)) mask_tf = tf.cast(tf.constant(mask), tf.float32) mask_tf_c = tf.cast(mask_tf, tf.complex64) x_complex = real2complex(x) x_complex = tf.cast(x_complex, tf.complex64) x_complex = tf.transpose(x_complex, [2, 0, 1]) kx = tf.fft2d(x_complex) kx_mask = kx * mask_tf_c x_u = tf.ifft2d(kx_mask) x_u = tf.transpose(x_u, [1, 2, 0]) kx_mask = tf.transpose(kx_mask, [1, 2, 0]) x_u_cat = complex2real(x_u) x_cat = tf.cast(x, tf.float32) mask_tf_c = tf.transpose(mask_tf_c, [1, 2, 0]) features, labels, kx_mask, masks = tf.train.shuffle_batch([x_u_cat,x_cat, kx_mask, mask_tf_c], batch_size=batch_size, num_threads=64, capacity=50, min_after_dequeue=10) return features, labels, kx_mask, masks