Python tarfile.BLOCKSIZE Examples
The following are 8
code examples of tarfile.BLOCKSIZE().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tarfile
, or try the search function
.
Example #1
Source File: tarwriter.py From qubes-core-admin with GNU Lesser General Public License v2.1 | 5 votes |
def format_sparse_map(self): sparsemap_txt = (str(len(self.sparsemap)) + '\n' + ''.join('{}\n{}\n'.format(*entry) for entry in self.sparsemap)) sparsemap_txt_len = len(sparsemap_txt) if sparsemap_txt_len % tarfile.BLOCKSIZE: padding = '\0' * (tarfile.BLOCKSIZE - sparsemap_txt_len % tarfile.BLOCKSIZE) else: padding = '' return (sparsemap_txt + padding).encode()
Example #2
Source File: tarwriter.py From qubes-core-admin with GNU Lesser General Public License v2.1 | 5 votes |
def get_sparse_map(input_file): ''' Return map of the file where actual data is present, ignoring zero-ed blocks. Last entry of the map spans to the end of file, even if that part is zero-size (when file ends with zeros). This function is performance critical. :param input_file: io.File object :return: iterable of (offset, size) ''' zero_block = bytearray(tarfile.BLOCKSIZE) buf = bytearray(BUF_SIZE) in_data_block = False data_block_start = 0 buf_start_offset = 0 while True: buf_len = input_file.readinto(buf) if not buf_len: break for offset in range(0, buf_len, tarfile.BLOCKSIZE): if buf[offset:offset+tarfile.BLOCKSIZE] == zero_block: if in_data_block: in_data_block = False yield (data_block_start, buf_start_offset+offset-data_block_start) else: if not in_data_block: in_data_block = True data_block_start = buf_start_offset+offset buf_start_offset += buf_len if in_data_block: yield (data_block_start, buf_start_offset-data_block_start) else: # always emit last slice to the input end - otherwise extracted file # will be truncated yield (buf_start_offset, 0)
Example #3
Source File: test_tarfile.py From Fluid-Designer with GNU General Public License v3.0 | 5 votes |
def test_eof_marker(self): # Make sure an end of archive marker is written (two zero blocks). # tarfile insists on aligning archives to a 20 * 512 byte recordsize. # So, we create an archive that has exactly 10240 bytes without the # marker, and has 20480 bytes once the marker is written. with tarfile.open(tmpname, self.mode) as tar: t = tarfile.TarInfo("foo") t.size = tarfile.RECORDSIZE - tarfile.BLOCKSIZE tar.addfile(t, io.BytesIO(b"a" * t.size)) with self.open(tmpname, "rb") as fobj: self.assertEqual(len(fobj.read()), tarfile.RECORDSIZE * 2)
Example #4
Source File: walarchive.py From barman with GNU General Public License v3.0 | 5 votes |
def addfile(self, tarinfo, fileobj=None): """ Add the provided fileobj to the tar using md5copyfileobj and saves the file md5 in the provided ChecksumTarInfo object. This method completely replaces TarFile.addfile() """ self._check("aw") tarinfo = copy.copy(tarinfo) buf = tarinfo.tobuf(self.format, self.encoding, self.errors) self.fileobj.write(buf) self.offset += len(buf) # If there's data to follow, append it. if fileobj is not None: tarinfo.data_checksum = md5copyfileobj( fileobj, self.fileobj, tarinfo.size) blocks, remainder = divmod(tarinfo.size, tarfile.BLOCKSIZE) if remainder > 0: self.fileobj.write( tarfile.NUL * (tarfile.BLOCKSIZE - remainder)) blocks += 1 self.offset += blocks * tarfile.BLOCKSIZE self.members.append(tarinfo)
Example #5
Source File: cloud.py From barman with GNU General Public License v3.0 | 5 votes |
def addfile(self, tarinfo, fileobj=None): """ Add the provided fileobj to the tar ignoring truncated or vanished files. This method completely replaces TarFile.addfile() """ self._check("awx") tarinfo = copy.copy(tarinfo) buf = tarinfo.tobuf(self.format, self.encoding, self.errors) self.fileobj.write(buf) self.offset += len(buf) # If there's data to follow, append it. if fileobj is not None: copyfileobj_pad_truncate(fileobj, self.fileobj, tarinfo.size) blocks, remainder = divmod(tarinfo.size, tarfile.BLOCKSIZE) if remainder > 0: self.fileobj.write( tarfile.NUL * (tarfile.BLOCKSIZE - remainder)) blocks += 1 self.offset += blocks * tarfile.BLOCKSIZE self.members.append(tarinfo)
Example #6
Source File: autoclaving.py From pipeline with BSD 3-Clause "New" or "Revised" License | 5 votes |
def tarfile_write_padding(tarfd, sz): blocks, remainder = divmod(sz, tarfile.BLOCKSIZE) if remainder > 0: tarfd.fileobj.write("\0" * (tarfile.BLOCKSIZE - remainder)) blocks += 1 tarfd.offset += blocks * tarfile.BLOCKSIZE assert tarfd.offset == tarfd.fileobj.tell() # Exception interrupts stream processing, tail of the stream can't be recovered. # Single blob may be malformed, but it's out of blob parser control.
Example #7
Source File: tarfile_write.py From pipeline with BSD 3-Clause "New" or "Revised" License | 5 votes |
def process_tarfd(l, tarfd): for tin, blob in l: tin.size = len(blob) tarfd.addfile(tin) # copy-paste from tarfile.addfile tarfd.fileobj.write(blob) blocks, remainder = divmod(len(blob), tarfile.BLOCKSIZE) if remainder > 0: tarfd.fileobj.write('\0' * (tarfile.BLOCKSIZE - remainder)) blocks += 1 tarfd.offset += blocks * tarfile.BLOCKSIZE
Example #8
Source File: test_tarfile.py From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 | 5 votes |
def test_eof_marker(self): # Make sure an end of archive marker is written (two zero blocks). # tarfile insists on aligning archives to a 20 * 512 byte recordsize. # So, we create an archive that has exactly 10240 bytes without the # marker, and has 20480 bytes once the marker is written. with tarfile.open(tmpname, self.mode) as tar: t = tarfile.TarInfo("foo") t.size = tarfile.RECORDSIZE - tarfile.BLOCKSIZE tar.addfile(t, io.BytesIO(b"a" * t.size)) with self.open(tmpname, "rb") as fobj: self.assertEqual(len(fobj.read()), tarfile.RECORDSIZE * 2)