Python os.read() Examples
The following are 30
code examples of os.read().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
os
, or try the search function
.
Example #1
Source File: iostream.py From tornado-zh with MIT License | 6 votes |
def read_from_fd(self): try: chunk = os.read(self.fd, self.read_chunk_size) except (IOError, OSError) as e: if errno_from_exception(e) in _ERRNO_WOULDBLOCK: return None elif errno_from_exception(e) == errno.EBADF: # If the writing half of a pipe is closed, select will # report it as readable but reads will fail with EBADF. self.close(exc_info=True) return None else: raise if not chunk: self.close() return None return chunk
Example #2
Source File: tarfile.py From jawfish with MIT License | 6 votes |
def read(self, size=None): """Return the next size number of bytes from the stream. If size is not defined, return all bytes of the stream up to EOF. """ if size is None: t = [] while True: buf = self._read(self.bufsize) if not buf: break t.append(buf) buf = "".join(t) else: buf = self._read(size) self.pos += len(buf) return buf
Example #3
Source File: test_capture.py From py with MIT License | 6 votes |
def test_dupfile(tmpfile): flist = [] for i in range(5): nf = py.io.dupfile(tmpfile, encoding="utf-8") assert nf != tmpfile assert nf.fileno() != tmpfile.fileno() assert nf not in flist print_(i, end="", file=nf) flist.append(nf) for i in range(5): f = flist[i] f.close() tmpfile.seek(0) s = tmpfile.read() assert "01234" in repr(s) tmpfile.close()
Example #4
Source File: tarfile.py From jawfish with MIT License | 6 votes |
def copyfileobj(src, dst, length=None): """Copy length bytes from fileobj src to fileobj dst. If length is None, copy the entire content. """ if length == 0: return if length is None: shutil.copyfileobj(src, dst) return BUFSIZE = 16 * 1024 blocks, remainder = divmod(length, BUFSIZE) for b in range(blocks): buf = src.read(BUFSIZE) if len(buf) < BUFSIZE: raise IOError("end of file reached") dst.write(buf) if remainder != 0: buf = src.read(remainder) if len(buf) < remainder: raise IOError("end of file reached") dst.write(buf) return
Example #5
Source File: tarfile.py From jawfish with MIT License | 6 votes |
def addfile(self, tarinfo, fileobj=None): """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is given, tarinfo.size bytes are read from it and added to the archive. You can create TarInfo objects using gettarinfo(). On Windows platforms, `fileobj' should always be opened with mode 'rb' to avoid irritation about the file size. """ self._check("aw") tarinfo = copy.copy(tarinfo) buf = tarinfo.tobuf(self.format, self.encoding, self.errors) self.fileobj.write(buf) self.offset += len(buf) # If there's data to follow, append it. if fileobj is not None: copyfileobj(fileobj, self.fileobj, tarinfo.size) blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) if remainder > 0: self.fileobj.write(NUL * (BLOCKSIZE - remainder)) blocks += 1 self.offset += blocks * BLOCKSIZE self.members.append(tarinfo)
Example #6
Source File: crashsignal.py From qutebrowser with GNU General Public License v3.0 | 6 votes |
def handle_signal_wakeup(self): """Handle a newly arrived signal. This gets called via self._notifier when there's a signal. Python will get control here, so the signal will get handled. """ assert self._notifier is not None log.destroy.debug("Handling signal wakeup!") self._notifier.setEnabled(False) read_fd = self._notifier.socket() try: os.read(int(read_fd), 1) except OSError: log.destroy.exception("Failed to read wakeup fd.") self._notifier.setEnabled(True)
Example #7
Source File: crashsignal.py From qutebrowser with GNU General Public License v3.0 | 6 votes |
def init_faulthandler(self): """Handle a segfault from a previous run and set up faulthandler.""" logname = os.path.join(standarddir.data(), 'crash.log') try: # First check if an old logfile exists. if os.path.exists(logname): with open(logname, 'r', encoding='ascii') as f: self._crash_log_data = f.read() os.remove(logname) self._init_crashlogfile() else: # There's no log file, so we can use this to display crashes to # the user on the next start. self._init_crashlogfile() except OSError: log.init.exception("Error while handling crash log file!") self._init_crashlogfile()
Example #8
Source File: _Cosimulation.py From myhdl with GNU Lesser General Public License v2.1 | 6 votes |
def _get(self): if not self._getMode: return buf = os.read(self._rt, _MAXLINE).decode() if not buf: raise CosimulationError(_error.SimulationEnd) e = buf.split() for i in range(1, len(e), 2): s, v = self._toSigDict[e[i]], e[i + 1] if v in 'zZ': next = None elif v in 'xX': next = s._init else: try: next = int(v, 16) if s._nrbits and s._min is not None and s._min < 0: if next >= (1 << (s._nrbits - 1)): next |= (-1 << s._nrbits) except ValueError: next = intbv(0) s.next = next self._getMode = 0
Example #9
Source File: iostream.py From tornado-zh with MIT License | 6 votes |
def read_from_fd(self): try: chunk = os.read(self.fd, self.read_chunk_size) except (IOError, OSError) as e: if errno_from_exception(e) in _ERRNO_WOULDBLOCK: return None elif errno_from_exception(e) == errno.EBADF: # If the writing half of a pipe is closed, select will # report it as readable but reads will fail with EBADF. self.close(exc_info=True) return None else: raise if not chunk: self.close() return None return chunk
Example #10
Source File: tarfile.py From jawfish with MIT License | 6 votes |
def __next__(self): """Return the next item using TarFile's next() method. When all members have been read, set TarFile as _loaded. """ # Fix for SF #1100429: Under rare circumstances it can # happen that getmembers() is called during iteration, # which will cause TarIter to stop prematurely. if self.index == 0 and self.tarfile.firstmember is not None: tarinfo = self.tarfile.next() elif self.index < len(self.tarfile.members): tarinfo = self.tarfile.members[self.index] elif not self.tarfile._loaded: tarinfo = self.tarfile.next() if not tarinfo: self.tarfile._loaded = True raise StopIteration else: raise StopIteration self.index += 1 return tarinfo #-------------------- # exported functions #--------------------
Example #11
Source File: tarfile.py From jawfish with MIT License | 6 votes |
def fromtarfile(cls, tarfile): """Return the next TarInfo object from TarFile object tarfile. """ buf = tarfile.fileobj.read(BLOCKSIZE) obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors) obj.offset = tarfile.fileobj.tell() - BLOCKSIZE return obj._proc_member(tarfile) #-------------------------------------------------------------------------- # The following are methods that are called depending on the type of a # member. The entry point is _proc_member() which can be overridden in a # subclass to add custom _proc_*() methods. A _proc_*() method MUST # implement the following # operations: # 1. Set self.offset_data to the position where the data blocks begin, # if there is data that follows. # 2. Set tarfile.offset to the position where the next member's header will # begin. # 3. Return self or another valid TarInfo object.
Example #12
Source File: linux_pty.py From TerminalView with MIT License | 5 votes |
def receive_output(self, max_read_size, timeout=0): """ Poll the shell output """ (ready, _, _) = select.select([self._master_fd], [], [], timeout) if not ready: return None try: data = os.read(self._master_fd, max_read_size) except OSError: return None return data
Example #13
Source File: iostream.py From tornado-zh with MIT License | 5 votes |
def _handle_read(self): try: pos = self._read_to_buffer_loop() except UnsatisfiableReadError: raise except Exception as e: gen_log.warning("error on read: %s" % e) self.close(exc_info=True) return if pos is not None: self._read_from_buffer(pos) return else: self._maybe_run_close_callback()
Example #14
Source File: iostream.py From tornado-zh with MIT License | 5 votes |
def _try_inline_read(self): """Attempt to complete the current read operation from buffered data. If the read can be completed without blocking, schedules the read callback on the next IOLoop iteration; otherwise starts listening for reads on the socket. """ # See if we've already got the data from a previous read self._run_streaming_callback() pos = self._find_read_pos() if pos is not None: self._read_from_buffer(pos) return self._check_closed() try: pos = self._read_to_buffer_loop() except Exception: # If there was an in _read_to_buffer, we called close() already, # but couldn't run the close callback because of _pending_callbacks. # Before we escape from this function, run the close callback if # applicable. self._maybe_run_close_callback() raise if pos is not None: self._read_from_buffer(pos) return # We couldn't satisfy the read inline, so either close the stream # or listen for new data. if self.closed(): self._maybe_run_close_callback() else: self._add_io_state(ioloop.IOLoop.READ)
Example #15
Source File: tarfile.py From recruit with Apache License 2.0 | 5 votes |
def copyfileobj(src, dst, length=None): """Copy length bytes from fileobj src to fileobj dst. If length is None, copy the entire content. """ if length == 0: return if length is None: while True: buf = src.read(16*1024) if not buf: break dst.write(buf) return BUFSIZE = 16 * 1024 blocks, remainder = divmod(length, BUFSIZE) for b in range(blocks): buf = src.read(BUFSIZE) if len(buf) < BUFSIZE: raise IOError("end of file reached") dst.write(buf) if remainder != 0: buf = src.read(remainder) if len(buf) < remainder: raise IOError("end of file reached") dst.write(buf) return
Example #16
Source File: test_capture.py From py with MIT License | 5 votes |
def test_stdin_nulled_by_default(self): print ("XXX this test may well hang instead of crashing") print ("XXX which indicates an error in the underlying capturing") print ("XXX mechanisms") cap = self.getcapture() py.test.raises(IOError, "sys.stdin.read()") out, err = cap.reset()
Example #17
Source File: programs.py From raveberry with GNU Lesser General Public License v3.0 | 5 votes |
def compute(self) -> None: """If active, read output from the cava program. Make sure that the most recent frame is always fully available, Stores incomplete frames for the next update.""" # do not compute if no program uses cava if self.consumers == 0: return # read the fifo until we get to the current frame while True: try: read = os.read( self.cava_fifo, self.frame_length - len(self.growing_frame) ) if read == b"": return self.growing_frame += read except OSError as e: if e.errno == errno.EAGAIN or e.errno == errno.EWOULDBLOCK: # there were not enough bytes for a whole frame, keep the old frame return # we read a whole frame, update the factors if len(self.growing_frame) == self.frame_length: # vol = max(0.01, self.lights.base.musiq.player.volume) # self.current_frame = [int(b) / 255 / vol for b in self.growing_frame] self.current_frame = [int(b) / 255 for b in self.growing_frame] self.growing_frame = b""
Example #18
Source File: utils.py From restrain-jit with MIT License | 5 votes |
def exec_cc(cmd, args): """ Execute with current context. Yes, you're right -- I'm naming it after call/cc. Return a generator. The first yielded one is the status of the execution of subprocess command. The following ones are the the buffer batches of stderr, each of which is a Python 'bytes' object """ file = cmd err_in, err_out = os.pipe() out_in, out_out = os.pipe() if os.fork(): _, status = os.wait() os.close(err_out) os.close(out_out) yield status while True: load = os.read(err_in, 1024) if not load: break yield load else: # for child process os.close(err_in) os.close(out_in) os.dup2(err_out, sys.stderr.fileno()) os.dup2(out_out, sys.stdout.fileno()) os.execvpe(file, [cmd, *args], dict(os.environ)) # in case that os.execvp fails sys.exit(127)
Example #19
Source File: pipeline.py From daudin with MIT License | 5 votes |
def print_(self): if isinstance(self.stdin, TextIOWrapper): s = self.stdin.read() print(s, end='' if s.endswith('\n') else '\n', file=self.outfp) elif isinstance(self.stdin, str): print(self.stdin, end='' if self.stdin.endswith('\n') else '\n', file=self.outfp) elif self.lastResultIsList: print('\n'.join(self.stdin), file=self.outfp) else: print(self.stdin, file=self.outfp)
Example #20
Source File: iostream.py From tornado-zh with MIT License | 5 votes |
def _handle_read(self): try: pos = self._read_to_buffer_loop() except UnsatisfiableReadError: raise except Exception as e: gen_log.warning("error on read: %s" % e) self.close(exc_info=True) return if pos is not None: self._read_from_buffer(pos) return else: self._maybe_run_close_callback()
Example #21
Source File: iostream.py From tornado-zh with MIT License | 5 votes |
def read_until_close(self, callback=None, streaming_callback=None): """Asynchronously reads all data from the socket until it is closed. If a ``streaming_callback`` is given, it will be called with chunks of data as they become available, and the final result will be empty. Otherwise, the result is all the data that was read. If a callback is given, it will be run with the data as an argument; if not, this method returns a `.Future`. Note that if a ``streaming_callback`` is used, data will be read from the socket as quickly as it becomes available; there is no way to apply backpressure or cancel the reads. If flow control or cancellation are desired, use a loop with `read_bytes(partial=True) <.read_bytes>` instead. .. versionchanged:: 4.0 The callback argument is now optional and a `.Future` will be returned if it is omitted. """ future = self._set_read_callback(callback) self._streaming_callback = stack_context.wrap(streaming_callback) if self.closed(): if self._streaming_callback is not None: self._run_read_callback(self._read_buffer_size, True) self._run_read_callback(self._read_buffer_size, False) return future self._read_until_close = True try: self._try_inline_read() except: if future is not None: future.add_done_callback(lambda f: f.exception()) raise return future
Example #22
Source File: iostream.py From tornado-zh with MIT License | 5 votes |
def read_bytes(self, num_bytes, callback=None, streaming_callback=None, partial=False): """Asynchronously read a number of bytes. If a ``streaming_callback`` is given, it will be called with chunks of data as they become available, and the final result will be empty. Otherwise, the result is all the data that was read. If a callback is given, it will be run with the data as an argument; if not, this method returns a `.Future`. If ``partial`` is true, the callback is run as soon as we have any bytes to return (but never more than ``num_bytes``) .. versionchanged:: 4.0 Added the ``partial`` argument. The callback argument is now optional and a `.Future` will be returned if it is omitted. """ future = self._set_read_callback(callback) assert isinstance(num_bytes, numbers.Integral) self._read_bytes = num_bytes self._read_partial = partial self._streaming_callback = stack_context.wrap(streaming_callback) try: self._try_inline_read() except: if future is not None: future.add_done_callback(lambda f: f.exception()) raise return future
Example #23
Source File: iostream.py From tornado-zh with MIT License | 5 votes |
def read_until(self, delimiter, callback=None, max_bytes=None): """Asynchronously read until we have found the given delimiter. The result includes all the data read including the delimiter. If a callback is given, it will be run with the data as an argument; if not, this method returns a `.Future`. If ``max_bytes`` is not None, the connection will be closed if more than ``max_bytes`` bytes have been read and the delimiter is not found. .. versionchanged:: 4.0 Added the ``max_bytes`` argument. The ``callback`` argument is now optional and a `.Future` will be returned if it is omitted. """ future = self._set_read_callback(callback) self._read_delimiter = delimiter self._read_max_bytes = max_bytes try: self._try_inline_read() except UnsatisfiableReadError as e: # Handle this the same way as in _handle_events. gen_log.info("Unsatisfiable read, closing connection: %s" % e) self.close(exc_info=True) return future except: if future is not None: future.add_done_callback(lambda f: f.exception()) raise return future
Example #24
Source File: iostream.py From tornado-zh with MIT License | 5 votes |
def read_until_regex(self, regex, callback=None, max_bytes=None): """Asynchronously read until we have matched the given regex. The result includes the data that matches the regex and anything that came before it. If a callback is given, it will be run with the data as an argument; if not, this method returns a `.Future`. If ``max_bytes`` is not None, the connection will be closed if more than ``max_bytes`` bytes have been read and the regex is not satisfied. .. versionchanged:: 4.0 Added the ``max_bytes`` argument. The ``callback`` argument is now optional and a `.Future` will be returned if it is omitted. """ future = self._set_read_callback(callback) self._read_regex = re.compile(regex) self._read_max_bytes = max_bytes try: self._try_inline_read() except UnsatisfiableReadError as e: # Handle this the same way as in _handle_events. gen_log.info("Unsatisfiable read, closing connection: %s" % e) self.close(exc_info=True) return future except: if future is not None: # Ensure that the future doesn't log an error because its # failure was never examined. future.add_done_callback(lambda f: f.exception()) raise return future
Example #25
Source File: iostream.py From tornado-zh with MIT License | 5 votes |
def read_from_fd(self): """Attempts to read from the underlying file. Returns ``None`` if there was nothing to read (the socket returned `~errno.EWOULDBLOCK` or equivalent), otherwise returns the data. When possible, should return no more than ``self.read_chunk_size`` bytes at a time. """ raise NotImplementedError()
Example #26
Source File: iostream.py From tornado-zh with MIT License | 5 votes |
def read_from_fd(self): if self._ssl_accepting: # If the handshake hasn't finished yet, there can't be anything # to read (attempting to read may or may not raise an exception # depending on the SSL version) return None try: # SSLSocket objects have both a read() and recv() method, # while regular sockets only have recv(). # The recv() method blocks (at least in python 2.6) if it is # called when there is nothing to read, so we have to use # read() instead. chunk = self.socket.read(self.read_chunk_size) except ssl.SSLError as e: # SSLError is a subclass of socket.error, so this except # block must come first. if e.args[0] == ssl.SSL_ERROR_WANT_READ: return None else: raise except socket.error as e: if e.args[0] in _ERRNO_WOULDBLOCK: return None else: raise if not chunk: self.close() return None return chunk
Example #27
Source File: iostream.py From tornado-zh with MIT License | 5 votes |
def _maybe_add_error_listener(self): # This method is part of an optimization: to detect a connection that # is closed when we're not actively reading or writing, we must listen # for read events. However, it is inefficient to do this when the # connection is first established because we are going to read or write # immediately anyway. Instead, we insert checks at various times to # see if the connection is idle and add the read listener then. if self._pending_callbacks != 0: return if self._state is None or self._state == ioloop.IOLoop.ERROR: if self.closed(): self._maybe_run_close_callback() elif (self._read_buffer_size == 0 and self._close_callback is not None): self._add_io_state(ioloop.IOLoop.READ)
Example #28
Source File: iostream.py From tornado-zh with MIT License | 5 votes |
def _read_from_buffer(self, pos): """Attempts to complete the currently-pending read from the buffer. The argument is either a position in the read buffer or None, as returned by _find_read_pos. """ self._read_bytes = self._read_delimiter = self._read_regex = None self._read_partial = False self._run_read_callback(pos, False)
Example #29
Source File: iostream.py From tornado-zh with MIT License | 5 votes |
def _try_inline_read(self): """Attempt to complete the current read operation from buffered data. If the read can be completed without blocking, schedules the read callback on the next IOLoop iteration; otherwise starts listening for reads on the socket. """ # See if we've already got the data from a previous read self._run_streaming_callback() pos = self._find_read_pos() if pos is not None: self._read_from_buffer(pos) return self._check_closed() try: pos = self._read_to_buffer_loop() except Exception: # If there was an in _read_to_buffer, we called close() already, # but couldn't run the close callback because of _pending_callbacks. # Before we escape from this function, run the close callback if # applicable. self._maybe_run_close_callback() raise if pos is not None: self._read_from_buffer(pos) return # We couldn't satisfy the read inline, so either close the stream # or listen for new data. if self.closed(): self._maybe_run_close_callback() else: self._add_io_state(ioloop.IOLoop.READ)
Example #30
Source File: iostream.py From tornado-zh with MIT License | 5 votes |
def _maybe_run_close_callback(self): # If there are pending callbacks, don't run the close callback # until they're done (see _maybe_add_error_handler) if self.closed() and self._pending_callbacks == 0: futures = [] if self._read_future is not None: futures.append(self._read_future) self._read_future = None if self._write_future is not None: futures.append(self._write_future) self._write_future = None if self._connect_future is not None: futures.append(self._connect_future) self._connect_future = None if self._ssl_connect_future is not None: futures.append(self._ssl_connect_future) self._ssl_connect_future = None for future in futures: future.set_exception(StreamClosedError(real_error=self.error)) if self._close_callback is not None: cb = self._close_callback self._close_callback = None self._run_callback(cb) # Delete any unfinished callbacks to break up reference cycles. self._read_callback = self._write_callback = None # Clear the buffers so they can be cleared immediately even # if the IOStream object is kept alive by a reference cycle. # TODO: Clear the read buffer too; it currently breaks some tests. self._write_buffer = None