Java Code Examples for be.tarsos.dsp.AudioEvent#getTimeStamp()
The following examples show how to use
be.tarsos.dsp.AudioEvent#getTimeStamp() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CrossCorrelation.java From Panako with GNU Affero General Public License v3.0 | 6 votes |
@Override public boolean process(AudioEvent audioEvent) { float[] fftData = audioEvent.getFloatBuffer().clone(); Arrays.fill(zeroPaddedData, 0); System.arraycopy(fftData, 0, zeroPaddedData, fftData.length/2, fftData.length); fft.forwardTransform(zeroPaddedData); fft.multiply(zeroPaddedData, zeroPaddedInvesedQuery); fft.backwardsTransform(zeroPaddedData); float maxVal = -100000; int maxIndex = 0; for(int i = 0 ; i<zeroPaddedData.length ; i++){ if(zeroPaddedData[i]> maxVal){ maxVal = zeroPaddedData[i]; maxIndex=i; } } float time = (float) (audioEvent.getTimeStamp() - audioEvent.getBufferSize()/audioEvent.getSampleRate() + maxIndex/2 /audioEvent.getSampleRate() + 0.005); handler.handleCrossCorrelation((float)audioEvent.getTimeStamp(), time, maxVal); return true; }
Example 2
Source File: FadeOut.java From cythara with GNU General Public License v3.0 | 6 votes |
@Override public boolean process(AudioEvent audioEvent) { // Don't do anything before the beginning of Fade Out if(isFadeOut==true) { if(firstTime==-1) firstTime=audioEvent.getTimeStamp(); // Decrease the gain according to time since the beginning of the Fade Out time=audioEvent.getTimeStamp()-firstTime; gp.setGain(1-time/duration); gp.process(audioEvent); } return true; }
Example 3
Source File: FadeIn.java From cythara with GNU General Public License v3.0 | 6 votes |
@Override public boolean process(AudioEvent audioEvent) { // Don't do anything after the end of the Fade In if(fadingIn) { if(firstTime==-1) firstTime=audioEvent.getTimeStamp(); // Increase the gain according to time since the beginning of the Fade In time=audioEvent.getTimeStamp()-firstTime; gp.setGain(time/duration); gp.process(audioEvent); if(time > duration){ fadingIn = false; } } return true; }
Example 4
Source File: ChromaPrintExtractor.java From Panako with GNU Affero General Public License v3.0 | 4 votes |
@Override public boolean process(AudioEvent audioEvent) { float[] buffer = audioEvent.getFloatBuffer().clone(); float[] magnitudes = new float[buffer.length/2]; float[] chroma = new float[12]; fft.forwardTransform(buffer); fft.modulus(buffer, magnitudes); //make chroma with C as starting point (MIDI key 0) for(int i = 0 ; i < magnitudes.length ;i++){ //only process from MIDI key 29 (43Hz) to 107 (3951Hz) if(binStartingPointsInCents[i] > 2900 && binStartingPointsInCents[i] < 10700){ magnitudes[i] = (float) Math.log1p(magnitudes[i]); int chromaIndex = Math.round(binStartingPointsInCents[i]/100) % 12; chroma[chromaIndex] += magnitudes[i]; } } //normalize on the euclidean norm float squares = 0; for(int i = 0 ; i < chroma.length ; i++){ squares += chroma[i] * chroma[i]; } squares = (float) Math.sqrt(squares); for(int i = 0 ; i < chroma.length ; i++){ chroma[i] = chroma[i]/squares; } //keep a running median for(int i = 0 ; i < chroma.length ; i++){ orderedMagnitudes.add(chroma[i]); if(orderedMagnitudes.size()==1){ currentMedian = chroma[i]; }else{ SortedSet<Float> h = orderedMagnitudes.headSet(currentMedian,true); SortedSet<Float> t = orderedMagnitudes.tailSet(currentMedian,false); int x = 1 - orderedMagnitudes.size() % 2; if (h.size() < t.size() + x) currentMedian = t.first(); else if (h.size() > t.size() + x) currentMedian = h.last(); } } //use the running median to binarize chroma for(int i = 0 ; i < chroma.length ; i++){ if(chroma[i] > currentMedian) chroma[i] = 1; else chroma[i] = 0; } float timeStamp = (float) audioEvent.getTimeStamp(); this.magnitudes.put(timeStamp , magnitudes); this.chromaMagnitudes.put(timeStamp, chroma); return true; }
Example 5
Source File: RafsExtractor.java From Panako with GNU Affero General Public License v3.0 | 4 votes |
@Override public boolean process(AudioEvent audioEvent) { audioBuffer = audioEvent.getFloatBuffer().clone(); fft.forwardTransform(audioBuffer); fft.modulus(audioBuffer, currentFFTMagnitudes); //clear Arrays.fill(currentMagnitudes, 0); //make chroma with C as starting point (MIDI key 0) for(int i = 0 ; i < currentFFTMagnitudes.length ;i++){ //only process from about 300Hz to 2000Hz if(binStartingPointsInCents[i] > centsStart && binStartingPointsInCents[i] < centStop){ //currentFFTMagnitudes[i] = (float) Math.log1p(currentFFTMagnitudes[i]); int bandIndex = Math.round((binStartingPointsInCents[i]-centsStart)/(float) (centStop - centsStart)*32); currentMagnitudes[bandIndex] += currentFFTMagnitudes[i]; } } float timeStamp = (float) audioEvent.getTimeStamp(); //this.magnitudes.put(timeStamp , magnitudes); if(previousMagnitudes != null){ //this makes sure that length is 32 currentFingerprint.set(31,true); for(int i = 0 ; i < currentFingerprint.length(); i++){ float difference = currentMagnitudes[i] - currentMagnitudes[i+1] - (previousMagnitudes[i] - previousMagnitudes[i+1]); boolean binaryValue = difference > 0; differences[i] = Math.abs(difference); currentFingerprint.set(i, binaryValue); } if(trackProbabilities){ //use differences to sort the indexes from low to high probability that the bit is correct. int[] sortedIndices = IntStream.range(0, differences.length) .boxed().sorted((i, j) -> differences[i].compareTo(differences[j]) ) .mapToInt(ele -> ele).toArray(); fingerprintProbabilities.put(timeStamp, sortedIndices); } fingerprints.put(timeStamp, (BitSet) currentFingerprint.clone()); } //switch pointers tempMagnitudes = previousMagnitudes; previousMagnitudes = currentMagnitudes; currentMagnitudes = tempMagnitudes; return true; }
Example 6
Source File: RafsPacker.java From Panako with GNU Affero General Public License v3.0 | 4 votes |
@Override public boolean process(AudioEvent audioEvent) { BitSet set = extractor.fingerprints.get((float) audioEvent.getTimeStamp()); int[] probabilities = null; if(trackProbabilities) probabilities = extractor.fingerprintProbabilities.get((float) audioEvent.getTimeStamp()); if(set != null){ if(bitIndex == 0){ currentTimeStamp = audioEvent.getTimeStamp(); } for(int i = 0 ; i < 32 ; i++){ currentFingerprint.set(bitIndex,set.get(i)); if(trackProbabilities) currentProbabilities[bitIndex]=probabilities[i]; bitIndex++; } //completed print if(bitIndex == 128){ //store the print packedFingerprints.put((float) currentTimeStamp, (BitSet) currentFingerprint.clone()); if(trackProbabilities) packedProbabilities.put((float) currentTimeStamp, currentProbabilities.clone()); //overlap of one print of 32 bits! bitIndex = 0; for(int i = 0 ; i < 32 ; i++){ currentFingerprint.set(bitIndex,set.get(i)); if(trackProbabilities) currentProbabilities[bitIndex]=probabilities[i]; bitIndex++; } currentTimeStamp = audioEvent.getTimeStamp(); } } return true; }
Example 7
Source File: ComplexOnsetDetector.java From cythara with GNU General Public License v3.0 | 4 votes |
private void onsetDetection(AudioEvent audioEvent){ //calculate the complex fft (the magnitude and phase) float[] data = audioEvent.getFloatBuffer().clone(); float[] power = new float[data.length/2]; float[] phase = new float[data.length/2]; fft.powerPhaseFFT(data, power, phase); float onsetValue = 0; for(int j = 0 ; j < power.length ; j++){ //int imgIndex = (power.length - 1) * 2 - j; // compute the predicted phase dev1[j] = 2.f * theta1[j] - theta2[j]; // compute the euclidean distance in the complex domain // sqrt ( r_1^2 + r_2^2 - 2 * r_1 * r_2 * \cos ( \phi_1 - \phi_2 ) ) onsetValue += Math.sqrt(Math.abs(Math.pow(oldmag[j],2) + Math.pow(power[j],2) - 2. * oldmag[j] *power[j] * Math.cos(dev1[j] - phase[j]))); /* swap old phase data (need to remember 2 frames behind)*/ theta2[j] = theta1[j]; theta1[j] = phase[j]; /* swap old magnitude data (1 frame is enough) */ oldmag[j]= power[j]; } lastOnsetValue = onsetValue; boolean isOnset = peakPicker.pickPeak(onsetValue); if(isOnset){ if(audioEvent.isSilence(silenceThreshold)){ isOnset = false; } else { double delay = ((audioEvent.getOverlap() * 4.3 ))/ audioEvent.getSampleRate(); double onsetTime = audioEvent.getTimeStamp() - delay; if(onsetTime - lastOnset > minimumInterOnsetInterval){ handler.handleOnset(onsetTime,peakPicker.getLastPeekValue()); lastOnset = onsetTime; } } } }
Example 8
Source File: FlangerEffect.java From cythara with GNU General Public License v3.0 | 4 votes |
@Override public boolean process(AudioEvent audioEvent) { float[] audioFloatBuffer = audioEvent.getFloatBuffer(); int overlap = audioEvent.getOverlap(); // Divide f by two, to counter rectifier below, which effectively // doubles the frequency double twoPIf = 2 * Math.PI * lfoFrequency / 2.0; double time = audioEvent.getTimeStamp(); double timeStep = 1.0 / sampleRate; for (int i = overlap; i < audioFloatBuffer.length; i++) { // Calculate the LFO delay value with a sine wave: //fix by hans bickel double lfoValue = (flangerBuffer.length - 1) * Math.sin(twoPIf * time); // add a time step, each iteration time += timeStep; // Make the delay a positive integer int delay = (int) (Math.round(Math.abs(lfoValue))); // store the current sample in the delay buffer; if (writePosition >= flangerBuffer.length) { writePosition = 0; } flangerBuffer[writePosition] = audioFloatBuffer[i]; // find out the position to read the delayed sample: int readPosition = writePosition - delay; if (readPosition < 0) { readPosition += flangerBuffer.length; } //increment the write position writePosition++; // Output is the input summed with the value at the delayed flanger // buffer audioFloatBuffer[i] = dry * audioFloatBuffer[i] + wet * flangerBuffer[readPosition]; } return true; }