Python snappy.StreamDecompressor() Examples
The following are 7
code examples of snappy.StreamDecompressor().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
snappy
, or try the search function
.
Example #1
Source File: request_responder.py From trinity with MIT License | 6 votes |
def _read_with_snappy(stream: INetStream, length: int) -> bytes: decompressor = StreamDecompressor() data = io.BytesIO() chunk_size = min(length, MAX_CHUNK_SIZE) chunk = await stream.read(chunk_size) remaining = length while chunk: chunk = decompressor.decompress(chunk) data.write(chunk) remaining -= len(chunk) if not remaining: break chunk = await stream.read(chunk_size) decompressor.flush() return data.getvalue()
Example #2
Source File: compression.py From filesystem_spec with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, infile, mode, **kwargs): import snappy self.details = {"size": 999999999} # not true, but OK if we don't seek super().__init__(fs=None, path="snappy", mode=mode.strip("b") + "b", **kwargs) self.infile = infile if "r" in mode: self.codec = snappy.StreamDecompressor() else: self.codec = snappy.StreamCompressor()
Example #3
Source File: snappy.py From gnsq with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, socket): self._decompressor = snappy.StreamDecompressor() self._compressor = snappy.StreamCompressor() super(SnappySocket, self).__init__(socket)
Example #4
Source File: gigantum.py From gigantum-client with MIT License | 5 votes |
def get_object(self, session: aiohttp.ClientSession, progress_update_fn: Callable) -> None: """Method to get the object from S3 after the pre-signed URL has been obtained Args: session: The current aiohttp session progress_update_fn: A callable with arg "completed_bytes" (int) indicating how many bytes have been downloaded in since last called Returns: None """ try: decompressor = snappy.StreamDecompressor() timeout = aiohttp.ClientTimeout(total=None, connect=2 * 60, sock_connect=None, sock_read=5*60) async with session.get(self.presigned_s3_url, timeout=timeout) as response: if response.status != 200: # An error occurred body = await response.text() raise IOError(f"Failed to get {self.object_details.dataset_path} to storage backend." f" Status: {response.status}. Response: {body}") async with aiofiles.open(self.object_details.object_path, 'wb') as fd: while True: chunk = await response.content.read(self.download_chunk_size) if not chunk: fd.write(decompressor.flush()) break decompressed_chunk = decompressor.decompress(chunk) await fd.write(decompressed_chunk) progress_update_fn(completed_bytes=len(decompressed_chunk)) except Exception as err: logger.exception(err) raise IOError(f"Failed to get {self.object_details.dataset_path} from storage backend. {err}")
Example #5
Source File: compressor.py From pghoard with Apache License 2.0 | 5 votes |
def _create_decompressor(self, alg): if alg == "snappy": return snappy.StreamDecompressor() elif alg == "lzma": return lzma.LZMADecompressor() elif alg == "zstd": return zstd.ZstdDecompressor().decompressobj() raise InvalidConfigurationError("invalid compression algorithm: {!r}".format(alg))
Example #6
Source File: snappyfile.py From pghoard with Apache License 2.0 | 5 votes |
def __init__(self, next_fp, mode): if snappy is None: raise io.UnsupportedOperation("Snappy is not available") if mode == "rb": self.decr = snappy.StreamDecompressor() self.encr = None elif mode == "wb": self.decr = None self.encr = snappy.StreamCompressor() else: raise io.UnsupportedOperation("unsupported mode for SnappyFile") super().__init__(next_fp) self.decr_done = False
Example #7
Source File: whisperbackup.py From whisper-backup with Apache License 2.0 | 4 votes |
def restore(script): # Build a list of metrics to restore from our object store and globbing metrics = search(script) # For each metric, find the date we want for i in metrics.keys(): objs = metrics[i] d = findBackup(script, objs, script.options.date) logger.info("Restoring %s from timestamp %s" % (i, d)) blobgz = script.store.get("%s%s/%s.wsp.%s" \ % (script.options.storage_path, i, d, script.options.algorithm)) blobSHA = script.store.get("%s%s/%s.sha1" \ % (script.options.storage_path, i, d)) if blobgz is None: logger.warning("Skipping missing file in object store: %s/%s.wsp.%s" \ % (i, d, script.options.algorithm)) continue # Decompress blobgz = StringIO(blobgz) blob = None if script.options.algorithm == "gz": fd = gzip.GzipFile(fileobj=blobgz, mode="rb") blob = fd.read() fd.close() elif script.options.algorithm == "sz": compressor = snappy.StreamDecompressor() blob = compressor.decompress(blobgz.getvalue()) try: compressor.flush() except UncompressError as e: logger.error("Corrupt file in store: %s%s/%s.wsp.sz Error %s" \ % (script.options.storage_path, i, d, str(e))) continue # Verify if blobSHA is None: logger.warning("Missing SHA1 checksum file...no verification") else: if hashlib.sha1(blob).hexdigest() != blobSHA: logger.warning("Backup does NOT verify, skipping metric %s" \ % i) continue heal(script, i, blob) # Clean up del blob blobgz.close()