Python scipy.io.wavfile.write() Examples
The following are 30
code examples of scipy.io.wavfile.write().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.io.wavfile
, or try the search function
.
Example #1
Source File: audio_tools.py From tools with BSD 3-Clause "New" or "Revised" License | 7 votes |
def run_phase_reconstruction_example(): fs, d = fetch_sample_speech_tapestry() # actually gives however many components you say! So double what .m file # says fftsize = 512 step = 64 X_s = np.abs(stft(d, fftsize=fftsize, step=step, real=False, compute_onesided=False)) X_t = iterate_invert_spectrogram(X_s, fftsize, step, verbose=True) """ import matplotlib.pyplot as plt plt.specgram(d, cmap="gray") plt.savefig("1.png") plt.close() plt.imshow(X_s, cmap="gray") plt.savefig("2.png") plt.close() """ wavfile.write("phase_original.wav", fs, soundsc(d)) wavfile.write("phase_reconstruction.wav", fs, soundsc(X_t))
Example #2
Source File: io.py From pydiogment with BSD 3-Clause "New" or "Revised" License | 7 votes |
def write_file(output_file_path, input_file_name, name_attribute, sig, fs): """ Read wave file as mono. Args: - output_file_path (str) : path to save resulting wave file to. - input_file_name (str) : name of processed wave file, - name_attribute (str) : attribute to add to output file name. - sig (array) : signal/audio array. - fs (int) : sampling rate. Returns: tuple of sampling rate and audio data. """ # set-up the output file name fname = os.path.basename(input_file_name).split(".wav")[0] + name_attribute fpath = os.path.join(output_file_path, fname) write(filename=fpath, rate=fs, data=sig) print("Writing data to " + fpath + ".")
Example #3
Source File: audio_tools.py From tools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def run_ltsd_example(): fs, d = fetch_sample_speech_tapestry() winsize = 1024 d = d.astype("float32") / 2 ** 15 d -= d.mean() pad = 3 * fs noise_pwr = np.percentile(d, 1) ** 2 noise_pwr = max(1E-9, noise_pwr) d = np.concatenate((np.zeros((pad,)) + noise_pwr * np.random.randn(pad), d)) _, vad_segments = ltsd_vad(d, fs, winsize=winsize) v_up = np.where(vad_segments == True)[0] s = v_up[0] st = v_up[-1] + int(.5 * fs) d = d[s:st] bname = "tapestry.wav".split(".")[0] wavfile.write("%s_out.wav" % bname, fs, soundsc(d))
Example #4
Source File: audio.py From blow with Apache License 2.0 | 6 votes |
def synthesize(frames,filename,stride,sr=16000,deemph=0,ymax=0.98,normalize=False): # Generate stream y=torch.zeros((len(frames)-1)*stride+len(frames[0])) for i,x in enumerate(frames): y[i*stride:i*stride+len(x)]+=x # To numpy & deemph y=y.numpy().astype(np.float32) if deemph>0: y=deemphasis(y,alpha=deemph) # Normalize if normalize: y-=np.mean(y) mx=np.max(np.abs(y)) if mx>0: y*=ymax/mx else: y=np.clip(y,-ymax,ymax) # To 16 bit & save wavfile.write(filename,sr,np.array(y*32767,dtype=np.int16)) return y ########################################################################################################################
Example #5
Source File: IOMethods.py From ASP with GNU General Public License v3.0 | 6 votes |
def sound(x,fs): """ Plays a wave file using the pyglet library. But first, it has to be written. Termination of the playback is being performed by any keyboard input and Enter. Args: x: (array) Floating point samples fs: (int) The sampling rate """ import pyglet as pg global player # Call the writing function AudioIO.wavWrite(x, fs, 16, 'testPlayback.wav') # Initialize playback engine player = pg.media.Player() # Initialize the object with the audio file playback = pg.media.load('testPlayback.wav') # Set it to player player.queue(playback) # Sound call player.play() # Killed by "keyboard" kill = raw_input() if kill or kill == '': AudioIO.stop() # Remove the dummy wave write os.remove('testPlayback.wav')
Example #6
Source File: analyzeMovieSound.py From pyAudioAnalysis with Apache License 2.0 | 6 votes |
def getMusicSegmentsFromFile(inputFile): modelType = "svm" modelName = "data/svmMovies8classes" dirOutput = inputFile[0:-4] + "_musicSegments" if os.path.exists(dirOutput) and dirOutput!=".": shutil.rmtree(dirOutput) os.makedirs(dirOutput) [Fs, x] = audioBasicIO.readAudioFile(inputFile) if modelType=='svm': [Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, compute_beat] = aT.load_model(modelName) elif modelType=='knn': [Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, compute_beat] = aT.load_model_knn(modelName) flagsInd, classNames, acc, CM = aS.mtFileClassification(inputFile, modelName, modelType, plotResults = False, gtFile = "") segs, classes = aS.flags2segs(flagsInd, mtStep) for i, s in enumerate(segs): if (classNames[int(classes[i])] == "Music") and (s[1] - s[0] >= minDuration): strOut = "{0:s}{1:.3f}-{2:.3f}.wav".format(dirOutput+os.sep, s[0], s[1]) wavfile.write( strOut, Fs, x[int(Fs*s[0]):int(Fs*s[1])])
Example #7
Source File: beamformers.py From beamformers with MIT License | 6 votes |
def BeamformIt(mixture, fs=8000, basedir='/Data/software/BeamformIt/', verbose=False): mixture /= np.max(np.abs(mixture)) if not os.path.exists('/tmp/audios/'): os.mkdir('/tmp/audios/') wavfile.write('/tmp/audios/rec.wav', fs, mixture.T) p = subprocess.Popen("cd {}; bash do_beamforming.sh /tmp/audios/ temps".format(basedir), stdout=subprocess.PIPE, shell=True) (output, err) = p.communicate() p_status = p.wait() ref_ch = int(str(output).split("Selected channel ")[1].split(' as the reference channel')[0]) if verbose: print("Output: {}".format(output)) print("Error: {}".format(err)) print("Status: {}".format(p_status)) s, _ = sf.read('{}/output/temps/temps.wav'.format(basedir)) return s, ref_ch
Example #8
Source File: audacityAnnotation2WAVs.py From pyAudioAnalysis with Apache License 2.0 | 6 votes |
def annotation2files(wavFile, csvFile): ''' Break an audio stream to segments of interest, defined by a csv file - wavFile: path to input wavfile - csvFile: path to csvFile of segment limits Input CSV file must be of the format <T1>\t<T2>\t<Label> ''' [Fs, x] = audioBasicIO.read_audio_file(wavFile) with open(csvFile, 'r') as csvfile: reader = csv.reader(csvfile, delimiter='\t', quotechar='|') for j, row in enumerate(reader): T1 = float(row[0].replace(",",".")) T2 = float(row[1].replace(",",".")) label = "%s_%s_%.2f_%.2f.wav" % (wavFile, row[2], T1, T2) label = label.replace(" ", "_") xtemp = x[int(round(T1*Fs)):int(round(T2*Fs))] print(T1, T2, label, xtemp.shape) wavfile.write(label, Fs, xtemp)
Example #9
Source File: IOMethods.py From ASP with GNU General Public License v3.0 | 6 votes |
def wavWrite(y, fs, nbits, audioFile): """ Write samples to WAV file Args: samples: (ndarray / 2D ndarray) (floating point) sample vector mono: DIM: nSamples stereo: DIM: nSamples x nChannels fs: (int) Sample rate in Hz nBits: (int) Number of bits fnWAV: (string) WAV file name to write """ if nbits == 8: intsamples = (y+1.0) * AudioIO.normFact['int' + str(nbits)] fX = np.int8(intsamples) elif nbits == 16: intsamples = y * AudioIO.normFact['int' + str(nbits)] fX = np.int16(intsamples) elif nbits > 16: fX = y write(audioFile, fs, fX)
Example #10
Source File: build_audio_database_v2.py From speech_separation with MIT License | 6 votes |
def split_to_mix(audio_path_list,database_repo=DATABASE_REPO_PATH,partition=2): # return split_list : (part1,part2,...) # each part : (idx,path) length = len(audio_path_list) part_len = length // partition head = 0 part_idx = 0 split_list = [] while((head+part_len)<length): part = audio_path_list[head:(head+part_len)] split_list.append(part) with open('%s/single_TF_part%d.txt'%(database_repo,part_idx),'a') as f: for idx, _ in part: name = 'single-%05d' % idx f.write('%s.npy' % name) f.write('\n') head += part_len part_idx += 1 return split_list # mix single TF data
Example #11
Source File: sigsys.py From scikit-dsp-comm with BSD 2-Clause "Simplified" License | 6 votes |
def to_wav(filename,rate,x): """ Write a wave file. A wrapper function for scipy.io.wavfile.write that also includes int16 scaling and conversion. Assume input x is [-1,1] values. Parameters ---------- filename : file name string rate : sampling frequency in Hz Returns ------- Nothing : writes only the *.wav file Examples -------- >>> to_wav('test_file.wav', 8000, x) """ x16 = np.int16(x*32767) wavfile.write(filename, rate, x16)
Example #12
Source File: io_methods.py From signaltrain with GNU General Public License v3.0 | 6 votes |
def wavWrite(y, fs, nbits, audioFile): """ Write samples to WAV file Args: samples: (ndarray / 2D ndarray) (floating point) sample vector mono: DIM: nSamples stereo: DIM: nSamples x nChannels fs: (int) Sample rate in Hz nBits: (int) Number of bits fnWAV: (string) WAV file name to write """ if nbits == 8: intsamples = (y+1.0) * AudioIO.normFact['int' + str(nbits)] fX = np.int8(intsamples) elif nbits == 16: intsamples = y * AudioIO.normFact['int' + str(nbits)] fX = np.int16(intsamples) elif nbits > 16: fX = y write(audioFile, fs, fX)
Example #13
Source File: io_methods.py From signaltrain with GNU General Public License v3.0 | 6 votes |
def sound(x,fs): """ Plays a wave file using the pyglet library. But first, it has to be written. Termination of the playback is being performed by any keyboard input and Enter. Args: x: (array) Floating point samples fs: (int) The sampling rate """ import pyglet as pg global player # Call the writing function AudioIO.wavWrite(x, fs, 16, 'testPlayback.wav') # Initialize playback engine player = pg.media.Player() # Initialize the object with the audio file playback = pg.media.load('testPlayback.wav') # Set it to player player.queue(playback) # Sound call player.play() # Killed by "keyboard" kill = raw_input() if kill or kill == '': AudioIO.stop() # Remove the dummy wave write os.remove('testPlayback.wav')
Example #14
Source File: soundfile.py From opensauce-python with Apache License 2.0 | 6 votes |
def _wavdata_rs(self): if self.fs_rs is not None: # Number of points in resample ns_rs = np.int_(np.ceil(self.ns * self.fs_rs / self.fs)) # Do resample # XXX: Tried using a Hamming window as a low pass filter, but it # didn't seem to make a big difference, so it's not used # here. data_rs = resample(self.wavdata, ns_rs) wavpath_rs = self.wavpath.split('.')[0] + '-resample-' + str(self.fs_rs) + 'Hz.wav' # Write resampled data to wav file # Convert data from 32-bit floating point to 16-bit PCM data_rs_int = np.int16(data_rs * 32768) wavfile.write(wavpath_rs, self.fs_rs, data_rs_int) # XXX: Was worried that Python might continue executing code # before the file write is finished, but it seems like it's # not an issue. return wavpath_rs, data_rs, data_rs_int, ns_rs else: return None, None, None, None
Example #15
Source File: audio_tools.py From tools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def run_fft_dct_example(): random_state = np.random.RandomState(1999) fs, d = fetch_sample_speech_fruit() n_fft = 64 X = d[0] X_stft = stft(X, n_fft) X_rr = complex_to_real_view(X_stft) X_dct = fftpack.dct(X_rr, axis=-1, norm='ortho') X_dct_sub = X_dct[1:] - X_dct[:-1] std = X_dct_sub.std(axis=0, keepdims=True) X_dct_sub += .01 * std * random_state.randn( X_dct_sub.shape[0], X_dct_sub.shape[1]) X_dct_unsub = np.cumsum(X_dct_sub, axis=0) X_idct = fftpack.idct(X_dct_unsub, axis=-1, norm='ortho') X_irr = real_to_complex_view(X_idct) X_r = istft(X_irr, n_fft)[:len(X)] SNR = 20 * np.log10(np.linalg.norm(X - X_r) / np.linalg.norm(X)) print(SNR) wavfile.write("fftdct_orig.wav", fs, soundsc(X)) wavfile.write("fftdct_rec.wav", fs, soundsc(X_r))
Example #16
Source File: IOMethods.py From ASP with GNU General Public License v3.0 | 6 votes |
def wavWrite(y, fs, nbits, audioFile): """ Write samples to WAV file Args: samples: (ndarray / 2D ndarray) (floating point) sample vector mono: DIM: nSamples stereo: DIM: nSamples x nChannels fs: (int) Sample rate in Hz nBits: (int) Number of bits fnWAV: (string) WAV file name to write """ if nbits == 8: intsamples = (y+1.0) * AudioIO.normFact['int' + str(nbits)] fX = np.int8(intsamples) elif nbits == 16: intsamples = y * AudioIO.normFact['int' + str(nbits)] fX = np.int16(intsamples) elif nbits > 16: fX = y write(audioFile, fs, fX)
Example #17
Source File: infer_wavenet.py From wavenet with Apache License 2.0 | 6 votes |
def generate_and_save_samples(sample_fn, length, count, dir, rate, levels): def save_samples(data): data = (data * np.reshape(np.arange(levels) / (levels-1), [levels, 1, 1])).sum( axis=1, keepdims=True) value = np.iinfo(np.int16).max audio = (utils.inverse_mulaw(data * 2 - 1) * value).astype(np.int16) for idx, sample in enumerate(audio): filename = os.path.join(dir, 'sample_{}.wav'.format(idx)) wavfile.write(filename, rate, np.squeeze(sample)) samples = chainer.Variable( chainer.cuda.cupy.zeros([count, levels, 1, length], dtype='float32')) one_hot_ref = chainer.cuda.cupy.eye(levels).astype('float32') with tqdm.tqdm(total=length) as bar: for i in range(length): probs = F.softmax(sample_fn(samples))[:, :, 0, 0, i] samples.data[:, :, 0, i] = one_hot_ref[utils.sample_from(probs.data.get())] bar.update() samples.to_cpu() save_samples(samples.data)
Example #18
Source File: audioAnalysis.py From pyAudioAnalysis with Apache License 2.0 | 5 votes |
def silenceRemovalWrapper(inputFile, smoothingWindow, weight): if not os.path.isfile(inputFile): raise Exception("Input audio file not found!") [fs, x] = audioBasicIO.read_audio_file(inputFile) segmentLimits = aS.silence_removal(x, fs, 0.05, 0.05, smoothingWindow, weight, True) for i, s in enumerate(segmentLimits): strOut = "{0:s}_{1:.3f}-{2:.3f}.wav".format(inputFile[0:-4], s[0], s[1]) wavfile.write(strOut, fs, x[int(fs * s[0]):int(fs * s[1])])
Example #19
Source File: common_joint.py From magenta with Apache License 2.0 | 5 votes |
def save_data(self, x, name, save_dir, x_is_real_x=False): """Save dataspace instances. Args: x: A numpy array of dataspace points. name: A string indicating the name in the saved file. save_dir: A string indicating the directory to put the saved file. x_is_real_x: An boolean indicating whether `x` is already in dataspace. If not, `x` is converted to dataspace before saving """ real_x = x if x_is_real_x else self.decode(x) real_x = real_x.reshape(-1) sample_file = os.path.join(save_dir, '%s.wav' % name) wavfile.write(sample_file, rate=16000, data=real_x)
Example #20
Source File: audio.py From ZeroSpeech-TTS-without-T with MIT License | 5 votes |
def save_wav(wav, path): wav *= 32767 / max(0.01, np.max(np.abs(wav))) wavfile.write(path, config.sample_rate, wav.astype(np.int16))
Example #21
Source File: audio.py From arabic-tacotron-tts with MIT License | 5 votes |
def save_wav(wav, path): wav *= 32767 / max(0.01, np.max(np.abs(wav))) # librosa.output.write_wav(path, wav.astype(np.int16), hparams.sample_rate) wavfile.write(path, hparams.sample_rate, wav.astype(np.int16))
Example #22
Source File: test_wavfile.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def _check_roundtrip(realfile, rate, dtype, channels): if realfile: fd, tmpfile = tempfile.mkstemp(suffix='.wav') os.close(fd) else: tmpfile = BytesIO() try: data = np.random.rand(100, channels) if channels == 1: data = data[:,0] if dtype.kind == 'f': # The range of the float type should be in [-1, 1] data = data.astype(dtype) else: data = (data*128).astype(dtype) wavfile.write(tmpfile, rate, data) for mmap in [False, True]: rate2, data2 = wavfile.read(tmpfile, mmap=mmap) assert_equal(rate, rate2) assert_(data2.dtype.byteorder in ('<', '=', '|'), msg=data2.dtype) assert_array_equal(data, data2) del data2 finally: if realfile: os.unlink(tmpfile)
Example #23
Source File: IOMethods.py From ASP with GNU General Public License v3.0 | 5 votes |
def sound(x,fs): """ Plays a wave file using the pyglet library. But first, it has to be written. Args: x: (array) Floating point samples fs: (int) The sampling rate """ import pyglet as pg # Call the writing function AudioIO.wavWrite(x, fs, 16, 'testPlayback.wav') # Initialize playback engine playback = pg.media.load('testPlayback.wav') # Sound call playback.play() # Remove the dummy wave write os.remove('testPlayback.wav')
Example #24
Source File: synthesizer.py From cdvae-vc with MIT License | 5 votes |
def world_synthesis(wav_name, feat_param, f0, ap, spectral, spectral_type): """WORLD SPEECH SYNTHESIS Args: wav_name (str): filename of synthesised wav feat_param (dict): acoustic feature parameter dictionary f0(np array): pitch features ap: aperiodicity features spectral: spectral features spectral_type: spectral feature type (sp or mcc) """ synthesizer = Synthesizer(fs=feat_param['fs'], fftl=feat_param['fftl'], shiftms=feat_param['shiftms']) if spectral_type == 'mcc': wav = synthesizer.synthesis(f0, spectral, ap, alpha=feat_param['mcep_alpha']) elif spectral_type == 'sp': wav = synthesizer.synthesis_spc(f0, spectral, ap) else: logging.info("Currently support 'mcep' or 'spc' only.") raise ValueError wav = np.clip(wav, -32768, 32767) wavfile.write(wav_name, feat_param['fs'], wav.astype(np.int16)) logging.info("wrote %s." % (wav_name))
Example #25
Source File: fastgen.py From magenta with Apache License 2.0 | 5 votes |
def save_batch(batch_audio, batch_save_paths): for audio, name in zip(batch_audio, batch_save_paths): tf.logging.info("Saving: %s" % name) wavfile.write(name, 16000, audio)
Example #26
Source File: audio.py From Tacotron-2 with MIT License | 5 votes |
def save_wav(wav, path, sr): wav *= 32767 / max(0.01, np.max(np.abs(wav))) #proposed by @dsmiller wavfile.write(path, sr, wav.astype(np.int16))
Example #27
Source File: audio.py From Tacotron-2 with MIT License | 5 votes |
def save_wavenet_wav(wav, path, sr, inv_preemphasize, k): # wav = inv_preemphasis(wav, k, inv_preemphasize) wav *= 32767 / max(0.01, np.max(np.abs(wav))) wavfile.write(path, sr, wav.astype(np.int16))
Example #28
Source File: 4_test.py From Tacotron-2-keras with MIT License | 5 votes |
def save_wav(wav, path, sr): wav *= 32767 / max(0.01, np.max(np.abs(wav))) #proposed by @dsmiller wavfile.write(path, sr, wav.astype(np.int16))
Example #29
Source File: utils.py From bird-species-classification with MIT License | 5 votes |
def write_wave_to_file(filename, rate, wave): wavfile.write(filename, rate, wave)
Example #30
Source File: generate_util.py From magenta with Apache License 2.0 | 5 votes |
def save_wav(audio, fname, sr=16000): wavfile.write(fname, sr, audio.astype('float32')) print('Saved to {}'.format(fname))