Python io.SEEK_END Examples
The following are 30
code examples of io.SEEK_END().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
io
, or try the search function
.
Example #1
Source File: segments.py From PyHDB with Apache License 2.0 | 6 votes |
def pack(self, payload, **kwargs): # remember position in payload object: segment_payload_start_pos = payload.tell() # Advance num bytes equal to header size. The header is written later # after the payload of all segments and parts has been written: payload.seek(self.header_size, io.SEEK_CUR) # Generate payload of parts: self.build_payload(payload) segment_length = payload.tell() - segment_payload_start_pos # calc length of parts payload self.header = RequestSegmentHeader(segment_length, self.offset, len(self.parts), self.number, self.segment_kind, self.message_type, int(kwargs.get('commit', 0)), self.command_options) packed_header = self.header_struct.pack(*self.header) # Go back to beginning of payload header for writing segment header: payload.seek(segment_payload_start_pos) payload.write(packed_header) # Put file pointer at the end of the bffer so that next segment can be appended: payload.seek(0, io.SEEK_END)
Example #2
Source File: qtutils.py From qutebrowser with GNU General Public License v3.0 | 6 votes |
def seek(self, offset: int, whence: int = io.SEEK_SET) -> int: self._check_open() self._check_random() if whence == io.SEEK_SET: ok = self.dev.seek(offset) elif whence == io.SEEK_CUR: ok = self.dev.seek(self.tell() + offset) elif whence == io.SEEK_END: ok = self.dev.seek(len(self) + offset) else: raise io.UnsupportedOperation("whence = {} is not " "supported!".format(whence)) if not ok: raise QtOSError(self.dev, msg="seek failed!") return self.dev.pos()
Example #3
Source File: file_slice.py From onedrive-sdk-python with MIT License | 6 votes |
def seek(self, offset, whence=io.SEEK_SET): if whence == io.SEEK_SET: desired_pos = self._start + offset if whence == io.SEEK_CUR: desired_pos = self._handle.tell() + offset if whence == io.SEEK_END: desired_pos = self._end + offset if desired_pos < self._start: raise ValueError("Seeking before the file slice") if desired_pos > self._end: raise ValueError("Seekeing past the end of file slice") ret = self._handle.seek(desired_pos, io.SEEK_SET) if ret: return ret - self._start else: return ret
Example #4
Source File: test_file_slice.py From onedrive-sdk-python with MIT License | 6 votes |
def testSanityChecks(self): with tempfile.TemporaryFile() as f: f.write(b'123456789') f.flush() with self.assertRaises(ValueError): part = FileSlice(f, -5, -2) with self.assertRaises(ValueError): part = FileSlice(f, 0, -2) with self.assertRaises(ValueError): part = FileSlice(f, -10, 2) with self.assertRaises(ValueError): part = FileSlice(f, 10, 2) with self.assertRaises(ValueError): part = FileSlice(f, 10, length=-2) part = FileSlice(f, 1, 5) with self.assertRaises(ValueError): part.seek(8) with self.assertRaises(ValueError): part.seek(8, io.SEEK_SET) part.seek(3) with self.assertRaises(ValueError): part.seek(4, io.SEEK_CUR) with self.assertRaises(ValueError): part.seek(-5, io.SEEK_END)
Example #5
Source File: test_function.py From compoundfiles with MIT License | 6 votes |
def test_stream_read(): with cf.CompoundFileReader('tests/example2.dat') as doc: # Same file as example.dat with an additional Stream 2 which is 4112 # bytes long (too long for mini FAT) with doc.open('Storage 1/Stream 1') as f: assert len(f.read()) == 544 f.seek(0) assert len(f.read(1024)) == 544 f.seek(0) assert len(f.read1()) == 64 f.seek(0, io.SEEK_END) assert f.read1() == b'' with doc.open('Storage 1/Stream 2') as f: assert len(f.read()) == 4112 f.seek(0) assert len(f.read1()) == 512 f.seek(0, io.SEEK_END) assert f.read1() == b''
Example #6
Source File: memory.py From bplustree with MIT License | 6 votes |
def __init__(self, filename: str, page_size: int): self.filename = filename + '-wal' self._fd, self._dir_fd = open_file_in_dir(self.filename) self._page_size = page_size self._committed_pages = dict() self._not_committed_pages = dict() self._fd.seek(0, io.SEEK_END) if self._fd.tell() == 0: self._create_header() self.needs_recovery = False else: logger.warning('Found an existing WAL file, ' 'the B+Tree was not closed properly') self.needs_recovery = True self._load_wal()
Example #7
Source File: test_serialization_fileview.py From dimod with Apache License 2.0 | 6 votes |
def test_readinto_partial_back_to_front(self, name, BQM, version): bqm = BQM(np.triu(np.arange(25).reshape((5, 5))), 'BINARY') bqm.offset = 14 fv = FileView(bqm, version=version) buff = fv.readall() for pos in range(1, fv.quadratic_end): fv.seek(-pos, io.SEEK_END) subbuff = bytearray(pos) # length pos self.assertEqual(fv.readinto(subbuff), len(subbuff)) self.assertEqual(buff[-pos:], subbuff)
Example #8
Source File: cfb.py From pyaaf2 with MIT License | 6 votes |
def seek(self, offset, whence=io.SEEK_SET): if whence == io.SEEK_CUR: offset = self.tell() + offset elif whence == io.SEEK_END: offset = self.dir.byte_size + offset if offset < 0: raise ValueError('New position is before the start of the stream') if offset > self.dir.byte_size: # logging.debug("overseek %d bytes, padding with zeros" % (offset - self.dir.byte_size)) self.pos = self.dir.byte_size bytes_left = offset - self.dir.byte_size min_seek_size = self.storage.sector_size * 4 while bytes_left: bytes_to_write = min(min_seek_size, offset - self.dir.byte_size) zeros = bytearray(bytes_to_write) self.write(zeros) bytes_left -= bytes_to_write self.pos = offset return offset
Example #9
Source File: test_file_slice.py From onedrive-sdk-python with MIT License | 6 votes |
def testSliceFileStartEnd(self): with tempfile.TemporaryFile() as f: f.write(b'123456789') f.flush() part = FileSlice(f, 0, 5) self.assertEqual(len(part), 5) self.assertEqual(part.read(), b'12345') self.assertEqual(part.read(3), b'') part.seek(0, io.SEEK_SET) self.assertEqual(part.read(3), b'123') self.assertEqual(part.tell(), 3) part.seek(-3, io.SEEK_CUR) self.assertEqual(part.tell(), 0) part.seek(-2, io.SEEK_END) self.assertEqual(part.tell(), 3) self.assertEqual(part.readall(), b'45') with self.assertRaises(IOError): part.write('abc') with self.assertRaises(IOError): part.writelines(['foo', 'bar'])
Example #10
Source File: descriptors.py From azure-kusto-python with MIT License | 6 votes |
def _detect_size(self): uncompressed_size = 0 if self.path.endswith(".gz"): # This logic follow after the C# implementation # See IngstionHelpers.cs for an explanation as to what stands behind it with open(self.path, "rb") as f: disk_size = f.seek(-4, SEEK_END) uncompressed_size = struct.unpack("I", f.read(4))[0] if (disk_size >= uncompressed_size) or (disk_size >= self.GZIP_MAX_DISK_SIZE_FOR_DETECTION): uncompressed_size = disk_size * self.DEFAULT_COMPRESSION_RATIO elif self.path.endswith(".zip"): with ZipFile(self.path) as zip_archive: for f in zip_archive.infolist(): uncompressed_size += f.file_size else: uncompressed_size = os.path.getsize(self.path) self._size = uncompressed_size
Example #11
Source File: EpsImagePlugin.py From teleport with Apache License 2.0 | 6 votes |
def _find_offset(self, fp): s = fp.read(160) if s[:4] == b"%!PS": # for HEAD without binary preview fp.seek(0, io.SEEK_END) length = fp.tell() offset = 0 elif i32(s[0:4]) == 0xC6D3D0C5: # FIX for: Some EPS file not handled correctly / issue #302 # EPS can contain binary data # or start directly with latin coding # more info see: # https://web.archive.org/web/20160528181353/http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf offset = i32(s[4:8]) length = i32(s[8:12]) else: raise SyntaxError("not an EPS file") return (length, offset)
Example #12
Source File: mkdz.py From kdztools with GNU General Public License v3.0 | 6 votes |
def write(self, file, name): """ Write our block to the file with the specified name """ input = io.FileIO(self.name, "rb") l = input.seek(0, io.SEEK_END) input.seek(0, io.SEEK_SET) print("[+] Writing {:s} to {:s} ({:d} bytes)".format(self.name, name, l)) buf = b" " while len(buf) > 0: buf = input.read(4096) file.write(buf) input.close()
Example #13
Source File: memory.py From bplustree with MIT License | 6 votes |
def _add_frame(self, frame_type: FrameType, page: Optional[int]=None, page_data: Optional[bytes]=None): if frame_type is FrameType.PAGE and (not page or not page_data): raise ValueError('PAGE frame without page data') if page_data and len(page_data) != self._page_size: raise ValueError('Page data is different from page size') if not page: page = 0 if frame_type is not FrameType.PAGE: page_data = b'' data = ( frame_type.value.to_bytes(FRAME_TYPE_BYTES, ENDIAN) + page.to_bytes(PAGE_REFERENCE_BYTES, ENDIAN) + page_data ) self._fd.seek(0, io.SEEK_END) write_to_file(self._fd, self._dir_fd, data, fsync=frame_type != FrameType.PAGE) self._index_frame(frame_type, page, self._fd.tell() - self._page_size)
Example #14
Source File: artifact.py From integration with Apache License 2.0 | 6 votes |
def _add_payloads(self): """ Adds all the stored payload to artifact. Each payload is itself a compressed tar. """ filenames = sorted(list(self._payloads.keys())) for filename in filenames: fd = self._payloads[filename] size = fd.seek(0, io.SEEK_END) fd.seek(0) payload_tarbin = io.BytesIO() payload_tar = tarfile.open(fileobj=payload_tarbin, mode="w:gz") tarhdr = tarfile.TarInfo(os.path.basename(filename)) tarhdr.size = size payload_tar.addfile(tarhdr, fd) payload_tar.close() tarhdr = tarfile.TarInfo(os.path.dirname(filename) + ".tar.gz") tarhdr.size = payload_tarbin.tell() self._compute_checksum(filename, fd) payload_tarbin.seek(0) self._tarfact.addfile(tarhdr, payload_tarbin)
Example #15
Source File: prepare_blackvue_videos.py From mapillary_tools with BSD 2-Clause "Simplified" License | 6 votes |
def find_camera_model(videos_folder): from mapillary_tools.uploader import get_video_file_list file_list = get_video_file_list(videos_folder) fd = open(file_list[0], 'rb') fd.seek(0, io.SEEK_END) eof = fd.tell() fd.seek(0) while fd.tell() < eof: try: box = Box.parse_stream(fd) except RangeError: print('error parsing blackvue GPS information, exiting') sys.exit(1) except ConstError: print('error parsing blackvue GPS information, exiting') sys.exit(1) if box.type.decode('utf-8') == 'free':# or 'ftyp': return box.data[29:39]
Example #16
Source File: EpsImagePlugin.py From teleport with Apache License 2.0 | 6 votes |
def _find_offset(self, fp): s = fp.read(160) if s[:4] == b"%!PS": # for HEAD without binary preview fp.seek(0, io.SEEK_END) length = fp.tell() offset = 0 elif i32(s[0:4]) == 0xC6D3D0C5: # FIX for: Some EPS file not handled correctly / issue #302 # EPS can contain binary data # or start directly with latin coding # more info see: # https://web.archive.org/web/20160528181353/http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf offset = i32(s[4:8]) length = i32(s[8:12]) else: raise SyntaxError("not an EPS file") return (length, offset)
Example #17
Source File: _compression.py From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 | 6 votes |
def __init__(self, fp, decomp_factory, trailing_error=(), **decomp_args): self._fp = fp self._eof = False self._pos = 0 # Current offset in decompressed stream # Set to size of decompressed stream once it is known, for SEEK_END self._size = -1 # Save the decompressor factory and arguments. # If the file contains multiple compressed streams, each # stream will need a separate decompressor object. A new decompressor # object is also needed when implementing a backwards seek(). self._decomp_factory = decomp_factory self._decomp_args = decomp_args self._decompressor = self._decomp_factory(**self._decomp_args) # Exception class to catch from decompressor signifying invalid # trailing data to ignore self._trailing_error = trailing_error
Example #18
Source File: ratarmount.py From ratarmount with MIT License | 6 votes |
def seek(self, offset, whence=io.SEEK_SET): if whence == io.SEEK_CUR: self.offset += offset elif whence == io.SEEK_END: self.offset = self.cumsizes[-1] + offset elif whence == io.SEEK_SET: self.offset = offset if self.offset < 0: raise Exception("Trying to seek before the start of the file!") if self.offset >= self.cumsizes[-1]: return self.offset i = self._findStencil( self.offset ) offsetInsideStencil = self.offset - self.cumsizes[i] assert offsetInsideStencil >= 0 assert offsetInsideStencil < self.sizes[i] self.fileobj.seek( self.offsets[i] + offsetInsideStencil, io.SEEK_SET ) return self.offset
Example #19
Source File: EpsImagePlugin.py From python3_ios with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _find_offset(self, fp): s = fp.read(160) if s[:4] == b"%!PS": # for HEAD without binary preview fp.seek(0, io.SEEK_END) length = fp.tell() offset = 0 elif i32(s[0:4]) == 0xC6D3D0C5: # FIX for: Some EPS file not handled correctly / issue #302 # EPS can contain binary data # or start directly with latin coding # more info see: # https://web.archive.org/web/20160528181353/http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf offset = i32(s[4:8]) length = i32(s[8:12]) else: raise SyntaxError("not an EPS file") return (length, offset)
Example #20
Source File: _compression.py From Fluid-Designer with GNU General Public License v3.0 | 6 votes |
def __init__(self, fp, decomp_factory, trailing_error=(), **decomp_args): self._fp = fp self._eof = False self._pos = 0 # Current offset in decompressed stream # Set to size of decompressed stream once it is known, for SEEK_END self._size = -1 # Save the decompressor factory and arguments. # If the file contains multiple compressed streams, each # stream will need a separate decompressor object. A new decompressor # object is also needed when implementing a backwards seek(). self._decomp_factory = decomp_factory self._decomp_args = decomp_args self._decompressor = self._decomp_factory(**self._decomp_args) # Exception class to catch from decompressor signifying invalid # trailing data to ignore self._trailing_error = trailing_error
Example #21
Source File: message.py From PyHDB with Apache License 2.0 | 6 votes |
def pack(self): """ Pack message to binary stream. """ payload = io.BytesIO() # Advance num bytes equal to header size - the header is written later # after the payload of all segments and parts has been written: payload.seek(self.header_size, io.SEEK_CUR) # Write out payload of segments and parts: self.build_payload(payload) packet_length = len(payload.getvalue()) - self.header_size self.header = MessageHeader(self.session_id, self.packet_count, packet_length, constants.MAX_SEGMENT_SIZE, num_segments=len(self.segments), packet_options=0) packed_header = self.header_struct.pack(*self.header) # Go back to begining of payload for writing message header: payload.seek(0) payload.write(packed_header) payload.seek(0, io.SEEK_END) trace(self) return payload
Example #22
Source File: VirtualFile.py From VideoSuperResolution with MIT License | 6 votes |
def seek(self, offset, where=SEEK_SET): """Seek the position by `offset` relative to `where`. Args: offset: move the read pointer by `offset` bytes. where: same as io.SEEK_END, io.SEEK_CUR or io.SEEK_SET. """ if where == SEEK_CUR: cur = len(self.read_file) pos = cur + offset elif where == SEEK_END: pos = len(self.read_file) + len(self.file) + offset else: pos = offset if pos < 0: pos = 0 self.file = self.read_file + self.file self.read_file = self.file[:pos] self.file = self.file[pos:] self.cur_fd = None
Example #23
Source File: VirtualFile.py From VideoSuperResolution with MIT License | 6 votes |
def read_frame2(self, frames=1, *args): """new API, saving memory while loading frames. But will consume a lot of file descriptors. Args: frames: number of frames to be loaded """ imgs = [] if frames == 0: return imgs while True: if len(self.file) > 0: cur_fd = self.file.pop(0) imgs.append(Image.open(cur_fd)) self.read_file.append(cur_fd) with open(cur_fd, 'rb') as fd: fd.seek(0, SEEK_END) self.read_pointer += fd.tell() elif self.rewind: self.reopen() else: raise EOFError('End of File!') if len(imgs) == frames: break return imgs
Example #24
Source File: lobs.py From PyHDB with Apache License 2.0 | 6 votes |
def seek(self, offset, whence=SEEK_SET): """Seek pointer in lob data buffer to requested position. Might trigger further loading of data from the database if the pointer is beyond currently read data. """ # A nice trick is to (ab)use BytesIO.seek() to go to the desired position for easier calculation. # This will not add any data to the buffer however - very convenient! self.data.seek(offset, whence) new_pos = self.data.tell() missing_bytes_to_read = new_pos - self._current_lob_length if missing_bytes_to_read > 0: # Trying to seek beyond currently available LOB data, so need to load some more first. # We are smart here: (at least trying...): # If a user sets a certain file position s/he probably wants to read data from # there. So already read some extra data to avoid yet another immediate # reading step. Try with EXTRA_NUM_ITEMS_TO_READ_AFTER_SEEK additional items (bytes/chars). # jump to the end of the current buffer and read the new data: self.data.seek(0, SEEK_END) self.read(missing_bytes_to_read + self.EXTRA_NUM_ITEMS_TO_READ_AFTER_SEEK) # reposition file pointer a originally desired position: self.data.seek(new_pos) return new_pos
Example #25
Source File: _compression.py From Imogen with MIT License | 6 votes |
def __init__(self, fp, decomp_factory, trailing_error=(), **decomp_args): self._fp = fp self._eof = False self._pos = 0 # Current offset in decompressed stream # Set to size of decompressed stream once it is known, for SEEK_END self._size = -1 # Save the decompressor factory and arguments. # If the file contains multiple compressed streams, each # stream will need a separate decompressor object. A new decompressor # object is also needed when implementing a backwards seek(). self._decomp_factory = decomp_factory self._decomp_args = decomp_args self._decompressor = self._decomp_factory(**self._decomp_args) # Exception class to catch from decompressor signifying invalid # trailing data to ignore self._trailing_error = trailing_error
Example #26
Source File: slob.py From slob with GNU General Public License v3.0 | 6 votes |
def test_seek_and_read(self): def mkfile(basename, content): part = os.path.join(self.tmpdir.name, basename) with fopen(part, 'wb') as f: f.write(content) return part content = b'abc\nd\nefgh\nij' part1 = mkfile('1', content[:4]) part2 = mkfile('2', content[4:5]) part3 = mkfile('3', content[5:]) with MultiFileReader(part1, part2, part3) as m: self.assertEqual(m.size, len(content)) m.seek(2) self.assertEqual(m.read(2), content[2:4]) m.seek(1) self.assertEqual(m.read(len(content) - 2), content[1:-1]) m.seek(-1, whence=io.SEEK_END) self.assertEqual(m.read(10), content[-1:]) m.seek(4) m.seek(-2, whence=io.SEEK_CUR) self.assertEqual(m.read(3), content[2:5])
Example #27
Source File: memory.py From bplustree with MIT License | 6 votes |
def __init__(self, filename: str, tree_conf: TreeConf, cache_size: int=512): self._filename = filename self._tree_conf = tree_conf self._lock = rwlock.RWLock() if cache_size == 0: self._cache = FakeCache() else: self._cache = cachetools.LRUCache(maxsize=cache_size) self._fd, self._dir_fd = open_file_in_dir(filename) self._wal = WAL(filename, tree_conf.page_size) if self._wal.needs_recovery: self.perform_checkpoint(reopen_wal=True) # Get the next available page self._fd.seek(0, io.SEEK_END) last_byte = self._fd.tell() self.last_page = int(last_byte / self._tree_conf.page_size) self._freelist_start_page = 0 # Todo: Remove this, it should only be in Tree self._root_node_page = 0
Example #28
Source File: lobs.py From PyHDB with Apache License 2.0 | 5 votes |
def _read_missing_lob_data_from_db(self, readoffset, readlength): """Read LOB request part from database""" logger.debug('Reading missing lob data from db. Offset: %d, readlength: %d' % (readoffset, readlength)) lob_data = self._make_read_lob_request(readoffset, readlength) # make sure we really got as many items (not bytes!) as requested: enc_lob_data = self._decode_lob_data(lob_data) assert readlength == len(enc_lob_data), 'expected: %d, received; %d' % (readlength, len(enc_lob_data)) # jump to end of data, and append new and properly decoded data to it: # import pdb;pdb.set_trace() self.data.seek(0, SEEK_END) self.data.write(enc_lob_data) self._current_lob_length = len(self.data.getvalue())
Example #29
Source File: streams.py From carpe with Apache License 2.0 | 5 votes |
def seek(self, offset, whence=io.SEEK_SET): """ Change the stream position to the given byte *offset*. *offset* is interpreted relative to the position indicated by *whence*. Values for *whence* are: * ``SEEK_SET`` or ``0`` - start of the stream (the default); *offset* should be zero or positive * ``SEEK_CUR`` or ``1`` - current stream position; *offset* may be negative * ``SEEK_END`` or ``2`` - end of the stream; *offset* is usually negative Return the new absolute position. """ if whence == io.SEEK_CUR: offset = self.tell() + offset elif whence == io.SEEK_END: offset = self._length + offset if offset < 0: raise ValueError( 'New position is before the start of the stream') self._set_pos(offset) return offset
Example #30
Source File: utils.py From pysradb with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_gzip_uncompressed_size(filepath): """Get uncompressed size of a .gz file Parameters ---------- filepath: string Path to input file Returns ------- filesize: int Uncompressed file size """ with gzip.open(filepath, "rb") as file_obj: return file_obj.seek(0, io.SEEK_END)