Python librosa.frames_to_time() Examples
The following are 10
code examples of librosa.frames_to_time().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
librosa
, or try the search function
.
Example #1
Source File: audio.py From amen with BSD 2-Clause "Simplified" License | 6 votes |
def _get_beats(self): """ Gets beats using librosa's beat tracker. """ _, beat_frames = librosa.beat.beat_track( y=self.analysis_samples, sr=self.analysis_sample_rate, trim=False ) # pad beat times to full duration f_max = librosa.time_to_frames(self.duration, sr=self.analysis_sample_rate) beat_frames = librosa.util.fix_frames(beat_frames, x_min=0, x_max=f_max) # convert frames to times beat_times = librosa.frames_to_time(beat_frames, sr=self.analysis_sample_rate) # make the list of (start, duration) tuples that TimingList expects starts_durs = [(s, t - s) for (s, t) in zip(beat_times, beat_times[1:])] return starts_durs
Example #2
Source File: audio.py From amen with BSD 2-Clause "Simplified" License | 6 votes |
def _get_segments(self): """ Gets Echo Nest style segments using librosa's onset detection and backtracking. """ onset_frames = librosa.onset.onset_detect( y=self.analysis_samples, sr=self.analysis_sample_rate, backtrack=True ) segment_times = librosa.frames_to_time( onset_frames, sr=self.analysis_sample_rate ) # make the list of (start, duration) tuples that TimingList expects starts_durs = [(s, t - s) for (s, t) in zip(segment_times, segment_times[1:])] return starts_durs
Example #3
Source File: audio.py From amen with BSD 2-Clause "Simplified" License | 6 votes |
def _convert_to_dataframe(cls, feature_data, columns): """ Take raw librosa feature data, convert to a pandas dataframe. Parameters --------- feature_data: numpy array a N by T array, where N is the number of features, and T is the number of time dimensions columns: list [strings] a list of column names of length N, the same as the N dimension of feature_data Returns ----- pandas.DataFrame """ feature_data = feature_data.transpose() frame_numbers = np.arange(len(feature_data)) indexes = librosa.frames_to_time(frame_numbers) indexes = pd.to_timedelta(indexes, unit='s') data = pd.DataFrame(data=feature_data, index=indexes, columns=columns) return data
Example #4
Source File: beat_analyzer.py From rcaudio with MIT License | 6 votes |
def run(self): while self.recorder.start_time is None: time.sleep(1) self.current_b = time.time() self.start_time = self.recorder.start_time while self.running.isSet(): if len(self.audio_data) < 4 * self.sr: time.sleep(.5) self.logger.debug("The data is not enough...") continue start_samples = len(self.audio_data) - self.rec_size if len(self.audio_data) > self.rec_size else 0 data = np.array(self.audio_data[start_samples:]).astype(np.float32) start_time = start_samples / self.sr tmpo, _beat_frames = librosa.beat.beat_track(y=data,sr = self.sr) beat_times = librosa.frames_to_time(_beat_frames) + start_time + self.start_time if len(beat_times) < 5: self.logger.debug("The beats count <%d> is not enough..."%len(beat_times)) continue self.expected_k,self.expected_b = np.polyfit(range(len(beat_times)),beat_times,1)
Example #5
Source File: beat_analyzer.py From rcaudio with MIT License | 6 votes |
def run(self): while self.recorder.start_time is None: time.sleep(1) self.current_b = time.time() self.start_time = self.recorder.start_time while self.running.isSet(): if len(self.audio_data) < 4 * self.sr: time.sleep(.5) self.logger.debug("The data is not enough...") continue start_samples = len(self.audio_data) - self.rec_size if len(self.audio_data) > self.rec_size else 0 data = np.array(self.audio_data[start_samples:]).astype(np.float32) start_time = start_samples / self.sr tmpo, _beat_frames = librosa.beat.beat_track(y=data,sr = self.sr) beat_times = librosa.frames_to_time(_beat_frames) + start_time + self.start_time if len(beat_times) < 5: self.logger.debug("The beats count <%d> is not enough..."%len(beat_times)) continue self.expected_k,self.expected_b = np.polyfit(range(len(beat_times)),beat_times,1)
Example #6
Source File: 02_wav_features_and_spectrogram.py From Convolutional-Autoencoder-Music-Similarity with MIT License | 6 votes |
def __init__(self, loadedAudio): self.wav = loadedAudio[0] self.samplefreq = loadedAudio[1] #If imported as 16-bit, convert to floating 32-bit ranging from -1 to 1 if (self.wav.dtype == 'int16'): self.wav = self.wav/(2.0**15) self.channels = 1 #Assumes mono, if stereo then 2 (found by self.wav.shape[1]) self.sample_points = self.wav.shape[0] self.audio_length_seconds = self.sample_points/self.samplefreq self.time_array_seconds = np.arange(0, self.sample_points, 1)/self.samplefreq self.tempo_bpm = librosa.beat.beat_track(y=self.wav, sr=self.samplefreq)[0] self.beat_frames = librosa.beat.beat_track(y=self.wav, sr=self.samplefreq)[1] #Transform beat array into seconds (these are the times when the beat hits) self.beat_times = librosa.frames_to_time(self.beat_frames, sr=self.samplefreq) #Get the rolloff frequency - the frequency at which the loudness drops off by 90%, like a low pass filter self.rolloff_freq = np.mean(librosa.feature.spectral_rolloff(y=self.wav, sr=self.samplefreq, hop_length=512, roll_percent=0.9))
Example #7
Source File: base.py From msaf with MIT License | 5 votes |
def estimate_beats(self): """Estimates the beats using librosa. Returns ------- times: np.array Times of estimated beats in seconds. frames: np.array Frame indeces of estimated beats. """ # Compute harmonic-percussive source separation if needed if self._audio_percussive is None: self._audio_harmonic, self._audio_percussive = self.compute_HPSS() # Compute beats tempo, frames = librosa.beat.beat_track( y=self._audio_percussive, sr=self.sr, hop_length=self.hop_length) # To times times = librosa.frames_to_time(frames, sr=self.sr, hop_length=self.hop_length) # TODO: Is this really necessary? if len(times) > 0 and times[0] == 0: times = times[1:] frames = frames[1:] return times, frames
Example #8
Source File: base.py From msaf with MIT License | 5 votes |
def _compute_framesync_times(self): """Computes the framesync times based on the framesync features.""" self._framesync_times = librosa.core.frames_to_time( np.arange(self._framesync_features.shape[0]), self.sr, self.hop_length)
Example #9
Source File: speech_cls_task.py From delta with Apache License 2.0 | 5 votes |
def get_duration(self, filename, sr): #pylint: disable=invalid-name ''' time in second ''' if filename.endswith('.npy'): nframe = np.load(filename).shape[0] return librosa.frames_to_time( nframe, hop_length=self._winstep * sr, sr=sr) if filename.endswith('.wav'): return librosa.get_duration(filename=filename) raise ValueError("filename suffix not .npy or .wav: {}".format( os.path.splitext(filename)[-1]))
Example #10
Source File: example_beat.py From jams with ISC License | 4 votes |
def beat_track(infile, outfile): # Load the audio file y, sr = librosa.load(infile) # Compute the track duration track_duration = librosa.get_duration(y=y, sr=sr) # Extract tempo and beat estimates tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr) # Convert beat frames to time beat_times = librosa.frames_to_time(beat_frames, sr=sr) # Construct a new JAMS object and annotation records jam = jams.JAMS() # Store the track duration jam.file_metadata.duration = track_duration beat_a = jams.Annotation(namespace='beat') beat_a.annotation_metadata = jams.AnnotationMetadata(data_source='librosa beat tracker') # Add beat timings to the annotation record. # The beat namespace does not require value or confidence fields, # so we can leave those blank. for t in beat_times: beat_a.append(time=t, duration=0.0) # Store the new annotation in the jam jam.annotations.append(beat_a) # Add tempo estimation to the annotation. tempo_a = jams.Annotation(namespace='tempo', time=0, duration=track_duration) tempo_a.annotation_metadata = jams.AnnotationMetadata(data_source='librosa tempo estimator') # The tempo estimate is global, so it should start at time=0 and cover the full # track duration. # If we had a likelihood score on the estimation, it could be stored in # `confidence`. Since we have no competing estimates, we'll set it to 1.0. tempo_a.append(time=0.0, duration=track_duration, value=tempo, confidence=1.0) # Store the new annotation in the jam jam.annotations.append(tempo_a) # Save to disk jam.save(outfile)