Python torch.blackman_window() Examples
The following are 6
code examples of torch.blackman_window().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: kaldi.py From audio with BSD 2-Clause "Simplified" License | 6 votes |
def _feature_window_function(window_type: str, window_size: int, blackman_coeff: float, device: torch.device, dtype: int, ) -> Tensor: r"""Returns a window function with the given type and size """ if window_type == HANNING: return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype) elif window_type == HAMMING: return torch.hamming_window(window_size, periodic=False, alpha=0.54, beta=0.46, device=device, dtype=dtype) elif window_type == POVEY: # like hanning but goes to zero at edges return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype).pow(0.85) elif window_type == RECTANGULAR: return torch.ones(window_size, device=device, dtype=dtype) elif window_type == BLACKMAN: a = 2 * math.pi / (window_size - 1) window_function = torch.arange(window_size, device=device, dtype=dtype) # can't use torch.blackman_window as they use different coefficients return (blackman_coeff - 0.5 * torch.cos(a * window_function) + (0.5 - blackman_coeff) * torch.cos(2 * a * window_function)).to(device=device, dtype=dtype) else: raise Exception('Invalid window type ' + window_type)
Example #2
Source File: modules.py From ddsp_pytorch with GNU General Public License v3.0 | 6 votes |
def get_window(name, window_length, squared=False): """ Returns a windowing function. Arguments: ---------- window (str) : name of the window, currently only 'hann' is available window_length (int) : length of the window squared (bool) : if true, square the window Returns: ---------- torch.FloatTensor : window of size `window_length` """ if name == "hann": window = torch.hann_window(window_length) elif name == "hamming": window = torch.hamming_window(window_length) elif name == "blackman": window = torch.blackman_window(window_length) else: raise ValueError("Invalid window name {}".format(name)) if squared: window *= window return window
Example #3
Source File: audio_preprocessing.py From NeMo with Apache License 2.0 | 6 votes |
def __init__(self, win_length, hop_length): super().__init__() self.win_length = win_length self.hop_length = hop_length self.disable_casts = self._opt_level == Optimization.mxprO1 self.torch_windows = { 'hann': torch.hann_window, 'hamming': torch.hamming_window, 'blackman': torch.blackman_window, 'bartlett': torch.bartlett_window, 'ones': torch.ones, None: torch.ones, }
Example #4
Source File: features.py From training with Apache License 2.0 | 5 votes |
def __init__(self, sample_rate=8000, window_size=0.02, window_stride=0.01, n_fft=None, window="hamming", normalize="per_feature", log=True, center=True, dither=constant, pad_to=8, max_duration=16.7, frame_splicing=1): super(SpectrogramFeatures, self).__init__() torch_windows = { 'hann': torch.hann_window, 'hamming': torch.hamming_window, 'blackman': torch.blackman_window, 'bartlett': torch.bartlett_window, 'none': None, } self.win_length = int(sample_rate * window_size) self.hop_length = int(sample_rate * window_stride) self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length)) window_fn = torch_windows.get(window, None) window_tensor = window_fn(self.win_length, periodic=False) if window_fn else None self.window = window_tensor self.normalize = normalize self.log = log self.center = center self.dither = dither self.pad_to = pad_to self.frame_splicing = frame_splicing max_length = 1 + math.ceil( (max_duration * sample_rate - self.win_length) / self.hop_length ) max_pad = 16 - (max_length % 16) self.max_length = max_length + max_pad
Example #5
Source File: features.py From inference with Apache License 2.0 | 4 votes |
def __init__(self, sample_rate=8000, window_size=0.02, window_stride=0.01, window="hamming", normalize="per_feature", n_fft=None, preemph=0.97, nfilt=64, lowfreq=0, highfreq=None, log=True, dither=constant, pad_to=8, max_duration=16.7, frame_splicing=1): super(FilterbankFeatures, self).__init__() # print("PADDING: {}".format(pad_to)) torch_windows = { 'hann': torch.hann_window, 'hamming': torch.hamming_window, 'blackman': torch.blackman_window, 'bartlett': torch.bartlett_window, 'none': None, } self.win_length = int(sample_rate * window_size) # frame size self.hop_length = int(sample_rate * window_stride) self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length)) self.normalize = normalize self.log = log self.dither = dither self.frame_splicing = frame_splicing self.nfilt = nfilt self.preemph = preemph self.pad_to = pad_to # For now, always enable this. # See https://docs.google.com/presentation/d/1IVC3J-pHB-ipJpKsJox_SqmDHYdkIaoCXTbKmJmV2-I/edit?usp=sharing for elaboration self.use_deterministic_dithering = True highfreq = highfreq or sample_rate / 2 window_fn = torch_windows.get(window, None) window_tensor = window_fn(self.win_length, periodic=False) if window_fn else None filterbanks = torch.tensor( librosa.filters.mel(sample_rate, self.n_fft, n_mels=nfilt, fmin=lowfreq, fmax=highfreq), dtype=torch.float).unsqueeze(0) # self.fb = filterbanks # self.window = window_tensor self.register_buffer("fb", filterbanks) self.register_buffer("window", window_tensor) # Calculate maximum sequence length (# frames) max_length = 1 + math.ceil( (max_duration * sample_rate - self.win_length) / self.hop_length ) max_pad = 16 - (max_length % 16) self.max_length = max_length + max_pad
Example #6
Source File: features.py From training with Apache License 2.0 | 4 votes |
def __init__(self, sample_rate=8000, window_size=0.02, window_stride=0.01, window="hamming", normalize="per_feature", n_fft=None, preemph=0.97, nfilt=64, lowfreq=0, highfreq=None, log=True, dither=constant, pad_to=8, max_duration=16.7, frame_splicing=1): super(FilterbankFeatures, self).__init__() # print("PADDING: {}".format(pad_to)) torch_windows = { 'hann': torch.hann_window, 'hamming': torch.hamming_window, 'blackman': torch.blackman_window, 'bartlett': torch.bartlett_window, 'none': None, } self.win_length = int(sample_rate * window_size) # frame size self.hop_length = int(sample_rate * window_stride) self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length)) self.normalize = normalize self.log = log self.dither = dither self.frame_splicing = frame_splicing self.nfilt = nfilt self.preemph = preemph self.pad_to = pad_to highfreq = highfreq or sample_rate / 2 window_fn = torch_windows.get(window, None) window_tensor = window_fn(self.win_length, periodic=False) if window_fn else None filterbanks = torch.tensor( librosa.filters.mel(sample_rate, self.n_fft, n_mels=nfilt, fmin=lowfreq, fmax=highfreq), dtype=torch.float).unsqueeze(0) # self.fb = filterbanks # self.window = window_tensor self.register_buffer("fb", filterbanks) self.register_buffer("window", window_tensor) # Calculate maximum sequence length (# frames) max_length = 1 + math.ceil( (max_duration * sample_rate - self.win_length) / self.hop_length ) max_pad = 16 - (max_length % 16) self.max_length = max_length + max_pad