Python pyaudio.PyAudio() Examples
The following are 30
code examples of pyaudio.PyAudio().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
pyaudio
, or try the search function
.
Example #1
Source File: proj2_yt_mvp.py From ai-makers-kit with MIT License | 13 votes |
def __enter__(self): self._audio_interface = pyaudio.PyAudio() self._audio_stream = self._audio_interface.open( format=pyaudio.paInt16, channels=1, rate=self._rate, input=True, frames_per_buffer=self._chunk, # Run the audio stream asynchronously to fill the buffer object. # This is necessary so that the input device's buffer doesn't # overflow while the calling thread makes network requests, etc. stream_callback=self._fill_buffer, ) self.closed = False return self #def __exit__(self, type, value, traceback):
Example #2
Source File: ex1_kwstest.py From ai-makers-kit with MIT License | 12 votes |
def play_file(fname): # create an audio object wf = wave.open(fname, 'rb') p = pyaudio.PyAudio() chunk = 1024 # open stream based on the wave object which has been input. stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # read data (based on the chunk size) data = wf.readframes(chunk) # play stream (looping from beginning of file to the end) while len(data) > 0: # writing to the stream is what *actually* plays the sound. stream.write(data) data = wf.readframes(chunk) # cleanup stuff. stream.close() p.terminate()
Example #3
Source File: ex4_getText2VoiceStream.py From ai-makers-kit with MIT License | 12 votes |
def play_file(fname): # create an audio object wf = wave.open(fname, 'rb') p = pyaudio.PyAudio() chunk = 1024 # open stream based on the wave object which has been input. stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # read data (based on the chunk size) data = wf.readframes(chunk) # play stream (looping from beginning of file to the end) while len(data) > 0: # writing to the stream is what *actually* plays the sound. stream.write(data) data = wf.readframes(chunk) # cleanup stuff. stream.close() p.terminate()
Example #4
Source File: mic.py From laibot-client with MIT License | 11 votes |
def __init__(self, speaker, passive_stt_engine, active_stt_engine): """ Initiates the pocketsphinx instance. Arguments: speaker -- handles platform-independent audio output passive_stt_engine -- performs STT while Jasper is in passive listen mode acive_stt_engine -- performs STT while Jasper is in active listen mode """ self._logger = logging.getLogger(__name__) self.speaker = speaker self.passive_stt_engine = passive_stt_engine self.active_stt_engine = active_stt_engine self._logger.info("Initializing PyAudio. ALSA/Jack error messages " + "that pop up during this process are normal and " + "can usually be safely ignored.") self._audio = pyaudio.PyAudio() self._logger.info("Initialization of PyAudio completed.")
Example #5
Source File: vad.py From audio with BSD 2-Clause "Simplified" License | 11 votes |
def __enter__(self): self._audio_interface = pyaudio.PyAudio() self._audio_stream = self._audio_interface.open( # format=pyaudio.paInt16, format=pyaudio.paFloat32, # The API currently only supports 1-channel (mono) audio # https://goo.gl/z757pE channels=1, rate=self._rate, input=True, frames_per_buffer=self._chunk, input_device_index=self._device, # Run the audio stream asynchronously to fill the buffer object. # This is necessary so that the input device's buffer doesn't # overflow while the calling thread makes network requests, etc. stream_callback=self._fill_buffer, ) self.closed = False return self
Example #6
Source File: snowboydecoder.py From google-assistant-hotword-raspi with MIT License | 11 votes |
def play_audio_file(fname=DETECT_DING): """Simple callback function to play a wave file. By default it plays a Ding sound. :param str fname: wave file name :return: None """ ding_wav = wave.open(fname, 'rb') ding_data = ding_wav.readframes(ding_wav.getnframes()) audio = pyaudio.PyAudio() stream_out = audio.open( format=audio.get_format_from_width(ding_wav.getsampwidth()), channels=ding_wav.getnchannels(), rate=ding_wav.getframerate(), input=False, output=True) stream_out.start_stream() stream_out.write(ding_data) time.sleep(0.2) stream_out.stop_stream() stream_out.close() audio.terminate()
Example #7
Source File: google_client.py From dialogflow_ros with MIT License | 9 votes |
def __init__(self): # Audio stream input setup FORMAT = pyaudio.paInt16 CHANNELS = 1 RATE = 16000 self.CHUNK = 4096 self.audio = pyaudio.PyAudio() self.stream = self.audio.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=self.CHUNK, stream_callback=self.get_data) self._buff = Queue.Queue() # Buffer to hold audio data self.closed = False # ROS Text Publisher self.text_pub = rospy.Publisher('/google_client/text', String, queue_size=10) # Context clues in yaml file rospack = rospkg.RosPack() yamlFileDir = rospack.get_path('dialogflow_ros') + '/config/context.yaml' with open(yamlFileDir, 'r') as f: self.context = yaml.load(f)
Example #8
Source File: SWHear.py From Python-GUI-examples with MIT License | 9 votes |
def valid_input_devices(self): """ See which devices can be opened for microphone input. call this when no PyAudio object is loaded. """ mics=[] for device in range(self.p.get_device_count()): if self.valid_test(device): mics.append(device) if len(mics)==0: print("no microphone devices found!") else: print("found %d microphone devices: %s"%(len(mics),mics)) return mics ### SETUP AND SHUTDOWN
Example #9
Source File: _audio.py From ai-makers-kit with MIT License | 8 votes |
def play_wav(fname, chunk=CHUNK): # create an audio object wf = wave.open(fname, 'rb') p = pyaudio.PyAudio() # open stream based on the wave object which has been input. stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # read data (based on the chunk size) data = wf.readframes(chunk) # play stream (looping from beginning of file to the end) while len(data) > 0: # writing to the stream is what *actually* plays the sound. stream.write(data) data = wf.readframes(chunk) # cleanup stuff stream.close() p.terminate()
Example #10
Source File: proj3_capital_game.py From ai-makers-kit with MIT License | 7 votes |
def __enter__(self): self._audio_interface = pyaudio.PyAudio() self._audio_stream = self._audio_interface.open( format=pyaudio.paInt16, channels=1, rate=self._rate, input=True, frames_per_buffer=self._chunk, # Run the audio stream asynchronously to fill the buffer object. # This is necessary so that the input device's buffer doesn't # overflow while the calling thread makes network requests, etc. stream_callback=self._fill_buffer, ) self.closed = False return self #def __exit__(self, type, value, traceback):
Example #11
Source File: ex4_getText2VoiceStream.py From ai-makers-kit with MIT License | 7 votes |
def play_file(fname): # create an audio object wf = wave.open(fname, 'rb') p = pyaudio.PyAudio() chunk = 1024 # open stream based on the wave object which has been input. stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # read data (based on the chunk size) data = wf.readframes(chunk) # play stream (looping from beginning of file to the end) while len(data) > 0: # writing to the stream is what *actually* plays the sound. stream.write(data) data = wf.readframes(chunk) # cleanup stuff. stream.close() p.terminate()
Example #12
Source File: _audio.py From ai-makers-kit with MIT License | 7 votes |
def play_wav(fname, chunk=CHUNK): # create an audio object wf = wave.open(fname, 'rb') p = pyaudio.PyAudio() # open stream based on the wave object which has been input. stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # read data (based on the chunk size) data = wf.readframes(chunk) # play stream (looping from beginning of file to the end) while len(data) > 0: # writing to the stream is what *actually* plays the sound. stream.write(data) data = wf.readframes(chunk) # cleanup stuff stream.close() p.terminate()
Example #13
Source File: snowboydecoder.py From dialogflow_ros with MIT License | 6 votes |
def play_audio_file(fname=DETECT_DING): """Simple callback function to play a wave file. By default it plays a Ding sound. :param str fname: wave file name :return: None """ ding_wav = wave.open(fname, 'rb') ding_data = ding_wav.readframes(ding_wav.getnframes()) audio = pyaudio.PyAudio() stream_out = audio.open( format=audio.get_format_from_width(ding_wav.getsampwidth()), channels=ding_wav.getnchannels(), rate=ding_wav.getframerate(), input=False, output=True) stream_out.start_stream() stream_out.write(ding_data) time.sleep(0.2) stream_out.stop_stream() stream_out.close() audio.terminate()
Example #14
Source File: action_playsound.py From dragonfly with GNU Lesser General Public License v3.0 | 6 votes |
def _get_pa_instance(): # Suppress initial ALSA messages if using ALSA. # Got this from: https://stackoverflow.com/a/17673011/12157649 try: asound = cdll.LoadLibrary('libasound.so') c_error_handler = ERROR_HANDLER_FUNC( lambda filename, line, function, err, fmt: None ) asound.snd_lib_error_set_handler(c_error_handler) except: # We'll most likely get here if the Port Audio host API isn't ALSA. asound = None # Create the pa instance. pa = pyaudio.PyAudio() # If necessary, restore the original error handler. if asound: asound.snd_lib_error_set_handler(None) return pa
Example #15
Source File: main.py From HanTTS with MIT License | 6 votes |
def _play_audio(path, delay): try: time.sleep(delay) wf = wave.open(path, 'rb') p = pyaudio.PyAudio() stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) data = wf.readframes(TextToSpeech.CHUNK) while data: stream.write(data) data = wf.readframes(TextToSpeech.CHUNK) stream.stop_stream() stream.close() p.terminate() return except: pass
Example #16
Source File: smart_trash_can.py From ai-makers-kit with MIT License | 6 votes |
def __enter__(self): self._audio_interface = pyaudio.PyAudio() self._audio_stream = self._audio_interface.open( format=pyaudio.paInt16, channels=1, rate=self._rate, input=True, frames_per_buffer=self._chunk, # Run the audio stream asynchronously to fill the buffer object. # This is necessary so that the input device's buffer doesn't # overflow while the calling thread makes network requests, etc. stream_callback=self._fill_buffer, ) self.closed = False return self #def __exit__(self, type, value, traceback):
Example #17
Source File: proj2_yt_mvp.py From ai-makers-kit with MIT License | 6 votes |
def __enter__(self): self._audio_interface = pyaudio.PyAudio() self._audio_stream = self._audio_interface.open( format=pyaudio.paInt16, channels=1, rate=self._rate, input=True, frames_per_buffer=self._chunk, # Run the audio stream asynchronously to fill the buffer object. # This is necessary so that the input device's buffer doesn't # overflow while the calling thread makes network requests, etc. stream_callback=self._fill_buffer, ) self.closed = False return self #def __exit__(self, type, value, traceback):
Example #18
Source File: speech_recognizer.py From libfaceid with MIT License | 6 votes |
def __init__(self, model): import speech_recognition # lazy loading import pyaudio device_count = pyaudio.PyAudio().get_device_count() - 1 print("\ndevice_count: " + str(device_count)) self._r = speech_recognition.Recognizer() try: mics = speech_recognition.Microphone.list_microphone_names() print("mics: " + str(mics)) index = mics.index(registered_mic) except: index = -1 print("index: " + str(index)) try: if index == -1: self._m = speech_recognition.Microphone() else: self._m = speech_recognition.Microphone(device_index=index) except: self._m = None print("SpeechRecognizer_Common, no mic detected!") self._model = model
Example #19
Source File: core_recorder.py From rcaudio with MIT License | 6 votes |
def run(self): self.logger.debug("Start to recording...") self.logger.debug(" Time = %s"%self.time) self.logger.debug(" Sample Rate = %s"%self.sr) self.start_time = time.time() pa=PyAudio() stream=pa.open(format = paInt16,channels=1, rate=self.sr,input=True, frames_per_buffer=self.frames_per_buffer) my_buf=[] count=0 if self.time is None: total_count = 1e10 else: total_count = self.time * self.sr / self.batch_num while count< total_count and self.__running.isSet(): datawav = stream.read(self.batch_num, exception_on_overflow = True) datause = np.fromstring(datawav,dtype = np.short) for w in datause: self.buffer.put(w) count+=1 stream.close()
Example #20
Source File: core_recorder.py From rcaudio with MIT License | 6 votes |
def run(self): self.logger.debug("Start to recording...") self.logger.debug(" Time = %s"%self.time) self.logger.debug(" Sample Rate = %s"%self.sr) self.start_time = time.time() pa=PyAudio() stream=pa.open(format = paInt16,channels=1, rate=self.sr,input=True, frames_per_buffer=self.frames_per_buffer) my_buf=[] count=0 if self.time is None: total_count = 1e10 else: total_count = self.time * self.sr / self.batch_num while count< total_count and self.__running.isSet(): datawav = stream.read(self.batch_num, exception_on_overflow = True) datause = np.fromstring(datawav,dtype = np.short) for w in datause: self.buffer.put(w) count+=1 stream.close()
Example #21
Source File: AVrecordeR.py From AVrecordeR with GNU General Public License v2.0 | 6 votes |
def __init__(self): self.open = True self.rate = 44100 self.frames_per_buffer = 1024 self.channels = 2 self.format = pyaudio.paInt16 self.audio_filename = "temp_audio.wav" self.audio = pyaudio.PyAudio() self.stream = self.audio.open(format=self.format, channels=self.channels, rate=self.rate, input=True, frames_per_buffer = self.frames_per_buffer) self.audio_frames = [] # Audio starts being recorded
Example #22
Source File: utils.py From voicetools with Apache License 2.0 | 6 votes |
def play(self, file_): wf = wave.open(file_, 'rb') p = pyaudio.PyAudio() stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) data = wf.readframes(self.CHUNK) while data != '': stream.write(data) data = wf.readframes(self.CHUNK) stream.stop_stream() stream.close() p.terminate()
Example #23
Source File: MicrophoneStream.py From ai-makers-kit with MIT License | 6 votes |
def play_file(fname): # create an audio object wf = wave.open(fname, 'rb') p = pyaudio.PyAudio() chunk = 1024 # open stream based on the wave object which has been input. stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # read data (based on the chunk size) data = wf.readframes(chunk) # play stream (looping from beginning of file to the end) while len(data) > 0: # writing to the stream is what *actually* plays the sound. stream.write(data) data = wf.readframes(chunk) # cleanup stuff. stream.close() p.terminate()
Example #24
Source File: mqtt.py From hermes-audio-server with MIT License | 6 votes |
def __init__(self, config, verbose, logger): """Initialize an MQTT client. Args: config (:class:`.ServerConfig`): The configuration of the MQTT client. verbose (bool): Whether or not the MQTT client runs in verbose mode. logger (:class:`logging.Logger`): The Logger object for logging messages. """ self.config = config self.verbose = verbose self.logger = logger self.mqtt = Client() self.logger.debug('Using %s', pyaudio.get_portaudio_version_text()) self.logger.debug('Creating PyAudio object...') self.audio = pyaudio.PyAudio() self.initialize() self.mqtt.on_connect = self.on_connect self.mqtt.on_disconnect = self.on_disconnect self.connect()
Example #25
Source File: ex4_getText2VoiceStream.py From ai-makers-kit with MIT License | 6 votes |
def play_file(fname): # create an audio object wf = wave.open(fname, 'rb') p = pyaudio.PyAudio() chunk = 1024 # open stream based on the wave object which has been input. stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # read data (based on the chunk size) data = wf.readframes(chunk) # play stream (looping from beginning of file to the end) while len(data) > 0: # writing to the stream is what *actually* plays the sound. stream.write(data) data = wf.readframes(chunk) # cleanup stuff. stream.close() p.terminate()
Example #26
Source File: ex4_getText2VoiceStream.py From ai-makers-kit with MIT License | 6 votes |
def play_file(fname): # create an audio object wf = wave.open(fname, 'rb') p = pyaudio.PyAudio() chunk = 1024 # open stream based on the wave object which has been input. stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # read data (based on the chunk size) data = wf.readframes(chunk) # play stream (looping from beginning of file to the end) while len(data) > 0: # writing to the stream is what *actually* plays the sound. stream.write(data) data = wf.readframes(chunk) # cleanup stuff. stream.close() p.terminate()
Example #27
Source File: recording.py From dragonfly with GNU Lesser General Public License v3.0 | 6 votes |
def _record(self): # Start recording audio on the current thread until stop() is # called. p = pyaudio.PyAudio() channels, rate = self.config.CHANNELS, self.config.RATE frames_per_buffer = self.config.FRAMES_PER_BUFFER pa_format = pyaudio.get_format_from_width(self.config.SAMPLE_WIDTH) stream = p.open(input=True, format=pa_format, channels=channels, rate=rate, frames_per_buffer=frames_per_buffer) # Start recognising in a loop stream.start_stream() while self._recording: with self._condition: self._buffers.append(stream.read(frames_per_buffer)) # Notify waiting threads (if any). self._condition.notifyAll() # This improves the performance; we don't need to process as # much audio as the device can read. time.sleep(self.read_interval) stream.close() p.terminate()
Example #28
Source File: hotword_dialogflow.py From dialogflow_ros with MIT License | 6 votes |
def __init__(self): self.interrupted = False self.detector = None rpack = RosPack() # UMDL or PMDL file paths along with audio files pkg_path = rpack.get_path('dialogflow_ros') self.model_path = pkg_path + '/scripts/snowboy/resources/jarvis.umdl' ding_path = pkg_path + '/scripts/snowboy/resources/ding.wav' # Setup df self.df_client = None # Setup audio output ding = wave.open(ding_path, 'rb') self.ding_data = ding.readframes(ding.getnframes()) self.audio = pyaudio.PyAudio() self.stream_out = self.audio.open( format=self.audio.get_format_from_width(ding.getsampwidth()), channels=ding.getnchannels(), rate=ding.getframerate(), input=False, output=True) self.last_contexts = [] rospy.loginfo("HOTWORD_CLIENT: Ready!")
Example #29
Source File: audio_server.py From dialogflow_ros with MIT License | 6 votes |
def __init__(self): FORMAT = pyaudio.paInt16 CHANNELS = 1 RATE = 16000 CHUNK = 4096 self.audio = pyaudio.PyAudio() self.stream = self.audio.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK, stream_callback=self._callback) self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.read_list = [self.serversocket] self._server_name = rospy.get_param('/dialogflow_client/server_name', '127.0.0.1') self._port = rospy.get_param('/dialogflow_client/port', 4444) rospy.loginfo("DF_CLIENT: Audio Server Started!")
Example #30
Source File: reader_microphone.py From audio-fingerprint-identifying-python with MIT License | 5 votes |
def __init__(self, a): super(MicrophoneReader, self).__init__(a) self.audio = pyaudio.PyAudio() self.stream = None self.data = [] self.channels = MicrophoneReader.default_channels self.chunksize = MicrophoneReader.default_chunksize self.rate = MicrophoneReader.default_rate self.recorded = False