Python matplotlib.pyplot.specgram() Examples
The following are 16
code examples of matplotlib.pyplot.specgram().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
matplotlib.pyplot
, or try the search function
.

Example #1
Source File: audio_tools.py From tools with BSD 3-Clause "New" or "Revised" License | 7 votes |
def run_phase_reconstruction_example(): fs, d = fetch_sample_speech_tapestry() # actually gives however many components you say! So double what .m file # says fftsize = 512 step = 64 X_s = np.abs(stft(d, fftsize=fftsize, step=step, real=False, compute_onesided=False)) X_t = iterate_invert_spectrogram(X_s, fftsize, step, verbose=True) """ import matplotlib.pyplot as plt plt.specgram(d, cmap="gray") plt.savefig("1.png") plt.close() plt.imshow(X_s, cmap="gray") plt.savefig("2.png") plt.close() """ wavfile.write("phase_original.wav", fs, soundsc(d)) wavfile.write("phase_reconstruction.wav", fs, soundsc(X_t))
Example #2
Source File: synth_demo.py From synthesizer with GNU Lesser General Public License v3.0 | 7 votes |
def harmonics(): synth = WaveSynth() freq = 1500 num_harmonics = 6 h_all = synth.harmonics(freq, 1, [(n, 1/n) for n in range(1, num_harmonics+1)]) even_harmonics = [(1, 1)] # always include fundamental tone harmonic even_harmonics.extend([(n, 1/n) for n in range(2, num_harmonics*2, 2)]) h_even = synth.harmonics(freq, 1, even_harmonics) h_odd = synth.harmonics(freq, 1, [(n, 1/n) for n in range(1, num_harmonics*2, 2)]) h_all.join(h_even).join(h_odd) import matplotlib.pyplot as plot plot.title("Spectrogram") plot.ylabel("Freq") plot.xlabel("Time") plot.specgram(h_all.get_frame_array(), Fs=synth.samplerate, noverlap=90, cmap=plot.cm.gist_heat) plot.show()
Example #3
Source File: iqplot.py From iqtool with The Unlicense | 6 votes |
def plotSpectrogram(data, fftWindow, fftSize, Fs): if fftSize == None: N = len(data) else: N = fftSize if Fs == None: Fs = 2 if fftWindow == "rectangular": plt.specgram(data, NFFT=N, Fs=Fs, window=lambda data: data*np.ones(len(data)), noverlap=int(N/10)) elif fftWindow == "bartlett": plt.specgram(data, NFFT=N, Fs=Fs, window=lambda data: data*np.bartlett(len(data)), noverlap=int(N/10)) elif args.fftWindow == "blackman": plt.specgram(data, NFFT=N, Fs=Fs, window=lambda data: data*np.blackman(len(data)), noverlap=int(N/10)) elif fftWindow == "hamming": plt.specgram(data, NFFT=N, Fs=Fs, window=lambda data: data*np.hamming(len(data)), noverlap=int(N/10)) elif fftWindow == "hanning": plt.specgram(data, NFFT=N, Fs=Fs, window=lambda data: data*np.hanning(len(data)), noverlap=int(N/10)) plt.show()
Example #4
Source File: audio_tools.py From dagbldr with BSD 3-Clause "New" or "Revised" License | 6 votes |
def run_phase_reconstruction_example(): fs, d = fetch_sample_speech_tapestry() # actually gives however many components you say! So double what .m file # says fftsize = 512 step = 64 X_s = np.abs(stft(d, fftsize=fftsize, step=step, real=False, compute_onesided=False)) X_t = iterate_invert_spectrogram(X_s, fftsize, step, verbose=True) """ import matplotlib.pyplot as plt plt.specgram(d, cmap="gray") plt.savefig("1.png") plt.close() plt.imshow(X_s, cmap="gray") plt.savefig("2.png") plt.close() """ wavfile.write("phase_original.wav", fs, soundsc(d)) wavfile.write("phase_reconstruction.wav", fs, soundsc(X_t))
Example #5
Source File: audio.py From representation_mixing with BSD 3-Clause "New" or "Revised" License | 6 votes |
def run_phase_reconstruction_example(): fs, d = fetch_sample_speech_tapestry() # actually gives however many components you say! So double what .m file # says fftsize = 512 step = 64 X_s = np.abs(stft(d, fftsize=fftsize, step=step, real=False, compute_onesided=False)) X_t = iterate_invert_spectrogram(X_s, fftsize, step, verbose=True) """ import matplotlib.pyplot as plt plt.specgram(d, cmap="gray") plt.savefig("1.png") plt.close() plt.imshow(X_s, cmap="gray") plt.savefig("2.png") plt.close() """ wavfile.write("phase_original.wav", fs, soundsc(d)) wavfile.write("phase_reconstruction.wav", fs, soundsc(X_t))
Example #6
Source File: audio_tools.py From representation_mixing with BSD 3-Clause "New" or "Revised" License | 6 votes |
def run_phase_reconstruction_example(): fs, d = fetch_sample_speech_tapestry() # actually gives however many components you say! So double what .m file # says fftsize = 512 step = 64 X_s = np.abs(stft(d, fftsize=fftsize, step=step, real=False, compute_onesided=False)) X_t = iterate_invert_spectrogram(X_s, fftsize, step, verbose=True) """ import matplotlib.pyplot as plt plt.specgram(d, cmap="gray") plt.savefig("1.png") plt.close() plt.imshow(X_s, cmap="gray") plt.savefig("2.png") plt.close() """ wavfile.write("phase_original.wav", fs, soundsc(d)) wavfile.write("phase_reconstruction.wav", fs, soundsc(X_t))
Example #7
Source File: audio_tools.py From tools with BSD 3-Clause "New" or "Revised" License | 5 votes |
def implot(arr, scale=None, title="", cmap="gray"): import matplotlib.pyplot as plt if scale is "specgram": # plotting part mag = 20. * np.log10(np.abs(arr)) # Transpose so time is X axis, and invert y axis so # frequency is low at bottom mag = mag.T[::-1, :] else: mag = arr f, ax = plt.subplots() ax.matshow(mag, cmap=cmap) plt.axis("off") x1 = mag.shape[0] y1 = mag.shape[1] def autoaspect(x_range, y_range): """ The aspect to make a plot square with ax.set_aspect in Matplotlib """ mx = max(x_range, y_range) mn = min(x_range, y_range) if x_range <= y_range: return mx / float(mn) else: return mn / float(mx) asp = autoaspect(x1, y1) ax.set_aspect(asp) plt.title(title)
Example #8
Source File: base.py From pyroomacoustics with MIT License | 5 votes |
def plot(self, NFFT=512, noverlap=384, **kwargs): ''' Plot the spectrogram of the audio sample. It takes the same keyword arguments as `matplotlib.pyplot.specgram <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.specgram.html>`_. ''' import numpy as np try: import matplotlib.pyplot as plt except ImportError: print('Warning: matplotlib is required for plotting') return # Handle single channel case if self.data.ndim == 1: data = self.data[:,None] nchannels = data.shape[1] # Try to have a square looking plot pcols = int(np.ceil(np.sqrt(nchannels))) prows = int(np.ceil(nchannels / pcols)) for c in range(nchannels): plt.specgram(data[:,c], NFFT=NFFT, Fs=self.fs, noverlap=noverlap, **kwargs) plt.xlabel('Time [s]') plt.ylabel('Frequency [Hz]') plt.title('Channel {}'.format(c+1))
Example #9
Source File: audio_tools.py From dagbldr with BSD 3-Clause "New" or "Revised" License | 5 votes |
def implot(arr, scale=None, title="", cmap="gray"): import matplotlib.pyplot as plt if scale is "specgram": # plotting part mag = 20. * np.log10(np.abs(arr)) # Transpose so time is X axis, and invert y axis so # frequency is low at bottom mag = mag.T[::-1, :] else: mag = arr f, ax = plt.subplots() ax.matshow(mag, cmap=cmap) plt.axis("off") x1 = mag.shape[0] y1 = mag.shape[1] def autoaspect(x_range, y_range): """ The aspect to make a plot square with ax.set_aspect in Matplotlib """ mx = max(x_range, y_range) mn = min(x_range, y_range) if x_range <= y_range: return mx / float(mn) else: return mn / float(mx) asp = autoaspect(x1, y1) ax.set_aspect(asp) plt.title(title)
Example #10
Source File: data_maker.py From AudioNet with MIT License | 5 votes |
def graph_spectrogram(wav_file): rate, data = get_wav_info(wav_file) print (type(data), len(data)) nfft = 256 # Length of the windowing segments fs = 256 # Sampling frequency pxx, freqs, bins, im = plt.specgram(data, nfft, fs) print ("pxx : ", len(pxx)) print ("freqs : ", len(freqs)) print ("bins : ", len(bins)) # plt.axis('on') # plt.show() plt.axis('off') print (wav_file.split('.wav')[0]) plt.savefig(wav_file.split('.wav')[0] + '.png', dpi=100, # Dots per inch frameon='false', aspect='normal', bbox_inches='tight', pad_inches=0) # Spectrogram saved as a .png try: im = Image.open(wav_file.split('.wav')[0] + '.png') rgb_im = im.convert('RGB') rgb_im.save(wav_file.split('.png')[0] + '.jpg') except Exception as e: print (e) if os.path.exists(wav_file.split('.wav')[0] + '.png'): os.system('convert '+(wav_file.split('.wav')[0] + '.png') + ' '+(wav_file.split('.wav')[0] + '.jpg')) os.remove(wav_file.split('.wav')[0] + '.png')
Example #11
Source File: audio.py From representation_mixing with BSD 3-Clause "New" or "Revised" License | 5 votes |
def implot(arr, scale=None, title="", cmap="gray"): import matplotlib.pyplot as plt if scale is "specgram": # plotting part mag = 20. * np.log10(np.abs(arr)) # Transpose so time is X axis, and invert y axis so # frequency is low at bottom mag = mag.T[::-1, :] else: mag = arr f, ax = plt.subplots() ax.matshow(mag, cmap=cmap) plt.axis("off") x1 = mag.shape[0] y1 = mag.shape[1] def autoaspect(x_range, y_range): """ The aspect to make a plot square with ax.set_aspect in Matplotlib """ mx = max(x_range, y_range) mn = min(x_range, y_range) if x_range <= y_range: return mx / float(mn) else: return mn / float(mx) asp = autoaspect(x1, y1) ax.set_aspect(asp) plt.title(title)
Example #12
Source File: audio_tools.py From representation_mixing with BSD 3-Clause "New" or "Revised" License | 5 votes |
def implot(arr, scale=None, title="", cmap="gray"): import matplotlib.pyplot as plt if scale is "specgram": # plotting part mag = 20. * np.log10(np.abs(arr)) # Transpose so time is X axis, and invert y axis so # frequency is low at bottom mag = mag.T[::-1, :] else: mag = arr f, ax = plt.subplots() ax.matshow(mag, cmap=cmap) plt.axis("off") x1 = mag.shape[0] y1 = mag.shape[1] def autoaspect(x_range, y_range): """ The aspect to make a plot square with ax.set_aspect in Matplotlib """ mx = max(x_range, y_range) mn = min(x_range, y_range) if x_range <= y_range: return mx / float(mn) else: return mn / float(mx) asp = autoaspect(x1, y1) ax.set_aspect(asp) plt.title(title)
Example #13
Source File: audio_tools.py From tools with BSD 3-Clause "New" or "Revised" License | 4 votes |
def run_phase_vq_example(): def _pre(list_of_data): # Temporal window setting is crucial! - 512 seems OK for music, 256 # fruit perhaps due to samplerates n_fft = 256 step = 32 f_r = np.vstack([np.abs(stft(dd, n_fft, step=step, real=False, compute_onesided=False)) for dd in list_of_data]) return f_r, n_fft, step def preprocess_train(list_of_data, random_state): f_r, n_fft, step = _pre(list_of_data) clusters = copy.deepcopy(f_r) return clusters def apply_preprocess(list_of_data, clusters): f_r, n_fft, step = _pre(list_of_data) f_clust = f_r # Nondeterministic ? memberships, distances = vq(f_clust, clusters) vq_r = clusters[memberships] d_k = iterate_invert_spectrogram(vq_r, n_fft, step, verbose=True) return d_k random_state = np.random.RandomState(1999) fs, d = fetch_sample_speech_fruit() d1 = d[::9] d2 = d[7::8][:5] # make sure d1 and d2 aren't the same! assert [len(di) for di in d1] != [len(di) for di in d2] clusters = preprocess_train(d1, random_state) fix_d1 = np.concatenate(d1) fix_d2 = np.concatenate(d2) vq_d2 = apply_preprocess(d2, clusters) wavfile.write("phase_train_no_agc.wav", fs, soundsc(fix_d1)) wavfile.write("phase_vq_test_no_agc.wav", fs, soundsc(vq_d2)) agc_d1, freq_d1, energy_d1 = time_attack_agc(fix_d1, fs, .5, 5) agc_d2, freq_d2, energy_d2 = time_attack_agc(fix_d2, fs, .5, 5) agc_vq_d2, freq_vq_d2, energy_vq_d2 = time_attack_agc(vq_d2, fs, .5, 5) """ import matplotlib.pyplot as plt plt.specgram(agc_vq_d2, cmap="gray") #plt.title("Fake") plt.figure() plt.specgram(agc_d2, cmap="gray") #plt.title("Real") plt.show() """ wavfile.write("phase_train_agc.wav", fs, soundsc(agc_d1)) wavfile.write("phase_test_agc.wav", fs, soundsc(agc_d2)) wavfile.write("phase_vq_test_agc.wav", fs, soundsc(agc_vq_d2))
Example #14
Source File: synth_demo.py From synthesizer with GNU Lesser General Public License v3.0 | 4 votes |
def fm(): synth = WaveSynth(samplerate=8000) from matplotlib import pyplot as plot freq = 2000 lfo1 = Sine(1, amplitude=0.4, samplerate=synth.samplerate) s1 = synth.sine(freq, duration=3, fm_lfo=lfo1) plot.title("Spectrogram") plot.ylabel("Freq") plot.xlabel("Time") plot.specgram(s1.get_frame_array(), Fs=synth.samplerate, noverlap=90, cmap=plot.cm.gist_heat) plot.show() with Output(nchannels=1, mixing="sequential") as out: synth = WaveSynth() freq = 440 lfo1 = Linear(5, samplerate=synth.samplerate) lfo1 = EnvelopeFilter(lfo1, 1, 0.5, 0.5, 0.5, 1) s1 = synth.sine(freq, duration=3, fm_lfo=lfo1) s_all = s1.copy() out.play_sample(s1) lfo1 = Sine(1, amplitude=0.2, samplerate=synth.samplerate) s1 = synth.sine(freq, duration=2, fm_lfo=lfo1) s_all.join(s1) out.play_sample(s1) lfo1 = Sine(freq/17, amplitude=0.5, samplerate=synth.samplerate) s1 = synth.sine(freq, duration=2, fm_lfo=lfo1) s_all.join(s1) out.play_sample(s1) lfo1 = Sine(freq/6, amplitude=0.5, samplerate=synth.samplerate) s1 = synth.sine(freq, duration=2, fm_lfo=lfo1) s_all.join(s1) out.play_sample(s1) lfo1 = Sine(1, amplitude=0.4, samplerate=synth.samplerate) s1 = synth.triangle(freq, duration=2, fm_lfo=lfo1) s_all.join(s1) out.play_sample(s1) freq = 440*2 lfo1 = Sine(freq/80, amplitude=0.4, samplerate=synth.samplerate) s1 = synth.triangle(freq, duration=2, fm_lfo=lfo1) s_all.join(s1) out.play_sample(s1) # s_all.write_wav("fmtestall.wav") out.wait_all_played()
Example #15
Source File: audio_tools.py From dagbldr with BSD 3-Clause "New" or "Revised" License | 4 votes |
def run_phase_vq_example(): def _pre(list_of_data): # Temporal window setting is crucial! - 512 seems OK for music, 256 # fruit perhaps due to samplerates n_fft = 256 step = 32 f_r = np.vstack([np.abs(stft(dd, n_fft, step=step, real=False, compute_onesided=False)) for dd in list_of_data]) return f_r, n_fft, step def preprocess_train(list_of_data, random_state): f_r, n_fft, step = _pre(list_of_data) clusters = copy.deepcopy(f_r) return clusters def apply_preprocess(list_of_data, clusters): f_r, n_fft, step = _pre(list_of_data) f_clust = f_r # Nondeterministic ? memberships, distances = vq(f_clust, clusters) vq_r = clusters[memberships] d_k = iterate_invert_spectrogram(vq_r, n_fft, step, verbose=True) return d_k random_state = np.random.RandomState(1999) fs, d = fetch_sample_speech_fruit() d1 = d[::9] d2 = d[7::8][:5] # make sure d1 and d2 aren't the same! assert [len(di) for di in d1] != [len(di) for di in d2] clusters = preprocess_train(d1, random_state) fix_d1 = np.concatenate(d1) fix_d2 = np.concatenate(d2) vq_d2 = apply_preprocess(d2, clusters) wavfile.write("phase_train_no_agc.wav", fs, soundsc(fix_d1)) wavfile.write("phase_vq_test_no_agc.wav", fs, soundsc(vq_d2)) agc_d1, freq_d1, energy_d1 = time_attack_agc(fix_d1, fs, .5, 5) agc_d2, freq_d2, energy_d2 = time_attack_agc(fix_d2, fs, .5, 5) agc_vq_d2, freq_vq_d2, energy_vq_d2 = time_attack_agc(vq_d2, fs, .5, 5) """ import matplotlib.pyplot as plt plt.specgram(agc_vq_d2, cmap="gray") #plt.title("Fake") plt.figure() plt.specgram(agc_d2, cmap="gray") #plt.title("Real") plt.show() """ wavfile.write("phase_train_agc.wav", fs, soundsc(agc_d1)) wavfile.write("phase_test_agc.wav", fs, soundsc(agc_d2)) wavfile.write("phase_vq_test_agc.wav", fs, soundsc(agc_vq_d2))
Example #16
Source File: audio.py From representation_mixing with BSD 3-Clause "New" or "Revised" License | 4 votes |
def run_phase_vq_example(): def _pre(list_of_data): # Temporal window setting is crucial! - 512 seems OK for music, 256 # fruit perhaps due to samplerates n_fft = 256 step = 32 f_r = np.vstack([np.abs(stft(dd, fftsize=n_fft, step=step, real=False, compute_onesided=False)) for dd in list_of_data]) return f_r, n_fft, step def preprocess_train(list_of_data, random_state): f_r, n_fft, step = _pre(list_of_data) clusters = copy.deepcopy(f_r) return clusters def apply_preprocess(list_of_data, clusters): f_r, n_fft, step = _pre(list_of_data) f_clust = f_r # Nondeterministic ? memberships, distances = vq(f_clust, clusters) vq_r = clusters[memberships] d_k = iterate_invert_spectrogram(vq_r, n_fft, step, verbose=True) return d_k random_state = np.random.RandomState(1999) fs, d = fetch_sample_speech_fruit() d1 = d[::9] d2 = d[7::8][:5] # make sure d1 and d2 aren't the same! assert [len(di) for di in d1] != [len(di) for di in d2] clusters = preprocess_train(d1, random_state) fix_d1 = np.concatenate(d1) fix_d2 = np.concatenate(d2) vq_d2 = apply_preprocess(d2, clusters) wavfile.write("phase_train_no_agc.wav", fs, soundsc(fix_d1)) wavfile.write("phase_vq_test_no_agc.wav", fs, soundsc(vq_d2)) agc_d1, freq_d1, energy_d1 = time_attack_agc(fix_d1, fs, .5, 5) agc_d2, freq_d2, energy_d2 = time_attack_agc(fix_d2, fs, .5, 5) agc_vq_d2, freq_vq_d2, energy_vq_d2 = time_attack_agc(vq_d2, fs, .5, 5) """ import matplotlib.pyplot as plt plt.specgram(agc_vq_d2, cmap="gray") #plt.title("Fake") plt.figure() plt.specgram(agc_d2, cmap="gray") #plt.title("Real") plt.show() """ wavfile.write("phase_train_agc.wav", fs, soundsc(agc_d1)) wavfile.write("phase_test_agc.wav", fs, soundsc(agc_d2)) wavfile.write("phase_vq_test_agc.wav", fs, soundsc(agc_vq_d2))