android.media.MediaRecorder.AudioSource Java Examples
The following examples show how to use
android.media.MediaRecorder.AudioSource.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RecordEngine.java From semitone with GNU General Public License v3.0 | 6 votes |
public static void create(Activity a) { if (created) return; created = ContextCompat.checkSelfPermission(a, Manifest.permission.RECORD_AUDIO) == PackageManager.PERMISSION_GRANTED; if (!created) return; bufsize = AudioRecord.getMinBufferSize(SAMPLE_RATE, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT); ar = new AudioRecord(AudioSource.MIC, SAMPLE_RATE, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, bufsize); DSP.init(bufsize); resume(); }
Example #2
Source File: AudioRecorder.java From react-native-google-nearby-connection with MIT License | 6 votes |
public AudioRecord findAudioRecord() { for (int rate : AudioBuffer.POSSIBLE_SAMPLE_RATES) { for (short audioFormat : new short[] { AudioFormat.ENCODING_PCM_8BIT, AudioFormat.ENCODING_PCM_16BIT }) { for (short channelConfig : new short[] { AudioFormat.CHANNEL_IN_MONO, AudioFormat.CHANNEL_IN_STEREO }) { try { Log.d(TAG, "Attempting rate " + rate + "Hz, bits: " + audioFormat + ", channel: " + channelConfig); int bufferSize = AudioRecord.getMinBufferSize(rate, channelConfig, audioFormat); if (bufferSize != AudioRecord.ERROR_BAD_VALUE) { // check if we can instantiate and have a success AudioRecord recorder = new AudioRecord(AudioSource.DEFAULT, rate, channelConfig, audioFormat, bufferSize); if (recorder.getState() == AudioRecord.STATE_INITIALIZED) { return recorder; } } } catch (Exception e) { Log.e(TAG, rate + "Exception, keep trying.",e); } } } } return null; }
Example #3
Source File: SpeechRecognizer.java From pocketsphinx-android with BSD 2-Clause "Simplified" License | 6 votes |
/** * Creates speech recognizer. Recognizer holds the AudioRecord object, so you * need to call {@link release} in order to properly finalize it. * * @param config The configuration object * @throws IOException thrown if audio recorder can not be created for some reason. */ protected SpeechRecognizer(Config config) throws IOException { decoder = new Decoder(config); sampleRate = (int)decoder.getConfig().getFloat("-samprate"); bufferSize = Math.round(sampleRate * BUFFER_SIZE_SECONDS); recorder = new AudioRecord( AudioSource.VOICE_RECOGNITION, sampleRate, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, bufferSize * 2); if (recorder.getState() == AudioRecord.STATE_UNINITIALIZED) { recorder.release(); throw new IOException( "Failed to initialize recorder. Microphone might be already in use."); } }
Example #4
Source File: Compatibility.java From CSipSimple with GNU General Public License v3.0 | 5 votes |
public static String getDefaultMicroSource() { // Except for galaxy S II :( if (!isCompatible(11) && Build.DEVICE.toUpperCase().startsWith("GT-I9100")) { return Integer.toString(AudioSource.MIC); } if (isCompatible(10)) { // Note that in APIs this is only available from level 11. // VOICE_COMMUNICATION return Integer.toString(0x7); } /* * Too risky in terms of regressions else if (isCompatible(4)) { // * VOICE_CALL return 0x4; } */ /* * if(android.os.Build.MODEL.equalsIgnoreCase("X10i")) { // VOICE_CALL * return Integer.toString(0x4); } */ /* * Not relevant anymore, atrix I tested sounds fine with that * if(android.os.Build.DEVICE.equalsIgnoreCase("olympus")) { //Motorola * atrix bug // CAMCORDER return Integer.toString(0x5); } */ return Integer.toString(AudioSource.DEFAULT); }
Example #5
Source File: AudioRecordingThread.java From AndroidRecording with Apache License 2.0 | 5 votes |
@Override public void run() { FileOutputStream out = prepareWriting(); if (out == null) { return; } AudioRecord record = new AudioRecord(AudioSource.VOICE_RECOGNITION, /*AudioSource.MIC*/ SAMPLING_RATE, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, bufferSize); record.startRecording(); int read = 0; while (isRecording) { read = record.read(audioBuffer, 0, bufferSize); if ((read == AudioRecord.ERROR_INVALID_OPERATION) || (read == AudioRecord.ERROR_BAD_VALUE) || (read <= 0)) { continue; } proceed(); write(out); } record.stop(); record.release(); finishWriting(out); convertRawToWav(); }
Example #6
Source File: AACHelper.java From CameraV with GNU General Public License v3.0 | 5 votes |
private int initAudioRecord(int rate) { try { Log.v("===========Attempting rate ", rate + "Hz, bits: " + audioFormat + ", channel: " + channelConfig); bufferSize = AudioRecord.getMinBufferSize(rate, channelConfig, audioFormat); if (bufferSize != AudioRecord.ERROR_BAD_VALUE) { // check if we can instantiate and have a success recorder = new AudioRecord(AudioSource.MIC, rate, channelConfig, audioFormat, bufferSize); if (recorder.getState() == AudioRecord.STATE_INITIALIZED) { Log.v("===========final rate ", rate + "Hz, bits: " + audioFormat + ", channel: " + channelConfig); return rate; } } } catch (Exception e) { Log.v("error", "" + rate); } return -1; }
Example #7
Source File: WebRtcAudioRecord.java From webrtc_android with MIT License | 4 votes |
private static int getDefaultAudioSource() { return AudioSource.VOICE_COMMUNICATION; }
Example #8
Source File: RNAudioRecordModule.java From react-native-audio-record with MIT License | 4 votes |
@ReactMethod public void init(ReadableMap options) { sampleRateInHz = 44100; if (options.hasKey("sampleRate")) { sampleRateInHz = options.getInt("sampleRate"); } channelConfig = AudioFormat.CHANNEL_IN_MONO; if (options.hasKey("channels")) { if (options.getInt("channels") == 2) { channelConfig = AudioFormat.CHANNEL_IN_STEREO; } } audioFormat = AudioFormat.ENCODING_PCM_16BIT; if (options.hasKey("bitsPerSample")) { if (options.getInt("bitsPerSample") == 8) { audioFormat = AudioFormat.ENCODING_PCM_8BIT; } } audioSource = AudioSource.VOICE_RECOGNITION; if (options.hasKey("audioSource")) { audioSource = options.getInt("audioSource"); } String documentDirectoryPath = getReactApplicationContext().getFilesDir().getAbsolutePath(); outFile = documentDirectoryPath + "/" + "audio.wav"; tmpFile = documentDirectoryPath + "/" + "temp.pcm"; if (options.hasKey("wavFile")) { String fileName = options.getString("wavFile"); outFile = documentDirectoryPath + "/" + fileName; } isRecording = false; eventEmitter = reactContext.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter.class); bufferSize = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat); int recordingBufferSize = bufferSize * 3; recorder = new AudioRecord(audioSource, sampleRateInHz, channelConfig, audioFormat, recordingBufferSize); }
Example #9
Source File: EmbeddedAssistant.java From sample-googleassistant with Apache License 2.0 | 4 votes |
/** * Returns an AssistantManager if all required parameters have been supplied. * * @return An inactive AssistantManager. Call {@link EmbeddedAssistant#connect()} to start * it. */ public EmbeddedAssistant build() { if (mEmbeddedAssistant.mRequestCallback == null) { throw new NullPointerException("There must be a defined RequestCallback"); } if (mEmbeddedAssistant.mConversationCallback == null) { throw new NullPointerException("There must be a defined ConversationCallback"); } if (mEmbeddedAssistant.mUserCredentials == null) { throw new NullPointerException("There must be provided credentials"); } if (mSampleRate == 0) { throw new NullPointerException("There must be a defined sample rate"); } final int audioEncoding = AudioFormat.ENCODING_PCM_16BIT; // Construct audio configurations. mEmbeddedAssistant.mAudioInConfig = AudioInConfig.newBuilder() .setEncoding(AudioInConfig.Encoding.LINEAR16) .setSampleRateHertz(mSampleRate) .build(); mEmbeddedAssistant.mAudioOutConfig = AudioOutConfig.newBuilder() .setEncoding(AudioOutConfig.Encoding.LINEAR16) .setSampleRateHertz(mSampleRate) .setVolumePercentage(mEmbeddedAssistant.mVolume) .build(); // Initialize Audio framework parameters. mEmbeddedAssistant.mAudioInputFormat = new AudioFormat.Builder() .setChannelMask(AudioFormat.CHANNEL_IN_MONO) .setEncoding(audioEncoding) .setSampleRate(mSampleRate) .build(); mEmbeddedAssistant.mAudioInputBufferSize = AudioRecord.getMinBufferSize( mEmbeddedAssistant.mAudioInputFormat.getSampleRate(), mEmbeddedAssistant.mAudioInputFormat.getChannelMask(), mEmbeddedAssistant.mAudioInputFormat.getEncoding()); mEmbeddedAssistant.mAudioOutputFormat = new AudioFormat.Builder() .setChannelMask(AudioFormat.CHANNEL_OUT_MONO) .setEncoding(audioEncoding) .setSampleRate(mSampleRate) .build(); mEmbeddedAssistant.mAudioOutputBufferSize = AudioTrack.getMinBufferSize( mEmbeddedAssistant.mAudioOutputFormat.getSampleRate(), mEmbeddedAssistant.mAudioOutputFormat.getChannelMask(), mEmbeddedAssistant.mAudioOutputFormat.getEncoding()); // create new AudioRecord to workaround audio routing issues. mEmbeddedAssistant.mAudioRecord = new AudioRecord.Builder() .setAudioSource(AudioSource.VOICE_RECOGNITION) .setAudioFormat(mEmbeddedAssistant.mAudioInputFormat) .setBufferSizeInBytes(mEmbeddedAssistant.mAudioInputBufferSize) .build(); if (mEmbeddedAssistant.mAudioInputDevice != null) { boolean result = mEmbeddedAssistant.mAudioRecord.setPreferredDevice( mEmbeddedAssistant.mAudioInputDevice); if (!result) { Log.e(TAG, "failed to set preferred input device"); } } // Construct DeviceConfig mEmbeddedAssistant.mDeviceConfig = DeviceConfig.newBuilder() .setDeviceId(mDeviceInstanceId) .setDeviceModelId(mDeviceModelId) .build(); // Construct default ScreenOutConfig mEmbeddedAssistant.mScreenOutConfig = ScreenOutConfig.newBuilder() .setScreenMode(ScreenOutConfig.ScreenMode.SCREEN_MODE_UNSPECIFIED) .build(); return mEmbeddedAssistant; }
Example #10
Source File: MainActivity.java From android-fskmodem with GNU General Public License v3.0 | 4 votes |
@Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); /// INIT FSK CONFIG try { mConfig = new FSKConfig(FSKConfig.SAMPLE_RATE_44100, FSKConfig.PCM_16BIT, FSKConfig.CHANNELS_MONO, FSKConfig.SOFT_MODEM_MODE_4, FSKConfig.THRESHOLD_20P); } catch (IOException e1) { e1.printStackTrace(); } /// INIT FSK DECODER mDecoder = new FSKDecoder(mConfig, new FSKDecoderCallback() { @Override public void decoded(byte[] newData) { final String text = new String(newData); runOnUiThread(new Runnable() { public void run() { TextView view = ((TextView) findViewById(R.id.result)); view.setText(view.getText()+text); } }); } }); /// //make sure that the settings of the recorder match the settings of the decoder //most devices cant record anything but 44100 samples in 16bit PCM format... mBufferSize = AudioRecord.getMinBufferSize(FSKConfig.SAMPLE_RATE_44100, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT); //scale up the buffer... reading larger amounts of data //minimizes the chance of missing data because of thread priority mBufferSize *= 10; //again, make sure the recorder settings match the decoder settings mRecorder = new AudioRecord(AudioSource.MIC, FSKConfig.SAMPLE_RATE_44100, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, mBufferSize); if (mRecorder.getState() == AudioRecord.STATE_INITIALIZED) { mRecorder.startRecording(); //start a thread to read the audio data Thread thread = new Thread(mRecordFeed); thread.setPriority(Thread.MAX_PRIORITY); thread.start(); } else { Log.i("FSKDecoder", "Please check the recorder settings, something is wrong!"); } }