Python shutil.copyfileobj() Examples
The following are 30
code examples of shutil.copyfileobj().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
shutil
, or try the search function
.
Example #1
Source File: can_haz_image.py From macops with Apache License 2.0 | 7 votes |
def DownloadFile(self, fileurl, dlfile): """Downloads a given file to a given path/filename. Args: fileurl: String with URL of file to download. dlfile: String with path of file to be written to. Raises: OSError: If file cannot be opened/written to, function raises OSError. URLError: If URL cannot be opened, fucntion raises URLError. """ if not os.path.isfile(dlfile) or dlfile == TMPINDEX: print 'Downloading %s ...' % fileurl file_to_dl = urllib2.urlopen(fileurl) tmpfile = open(dlfile, 'wb') shutil.copyfileobj(file_to_dl, tmpfile) else: print '%s exists' % dlfile
Example #2
Source File: LineApi.py From CyberTK-Self with GNU General Public License v2.0 | 7 votes |
def sendImageWithUrl(self, to_, url): """Send a image with given image url :param url: image url to send """ path = '%s/pythonLine-%1.data' % (tempfile.gettempdir(), randint(0, 9)) r = requests.get(url, stream=True) if r.status_code == 200: with open(path, 'w') as f: shutil.copyfileobj(r.raw, f) else: raise Exception('Download image failure.') try: self.sendImage(to_, path) except Exception as e: raise e
Example #3
Source File: talk.py From dpk with GNU General Public License v3.0 | 6 votes |
def sendImageWithURL2(self, to, url): """Send a image with given image url :param url: image url to send """ path = 'tmp/pythonLine.data' r = requests.get(url, stream=True) if r.status_code == 200: with open(path, 'wb') as f: shutil.copyfileobj(r.raw, f) else: raise Exception('Download image failure.') try: self.sendImage(to, path) except Exception as e: raise e
Example #4
Source File: image_helper.py From JJMumbleBot with GNU General Public License v3.0 | 6 votes |
def download_image_requests_to_dir(img_url, dir_name): dir_utils.clear_directory(f'{dir_utils.get_temp_med_dir()}/{dir_name}') img_ext = img_url.rsplit('.', 1)[1] s = requests.Session() r = s.get(img_url) if r.status_code == 200: with open(f"{dir_utils.get_temp_med_dir()}/{dir_name}/_image.{img_ext}", 'wb') as f: r.raw.decode_content = True shutil.copyfileobj(r.raw, f) dprint(f"Downloaded image from: {img_url}") else: dprint(f"{r.status_code} Error! - {img_url}") if img_ext == 'png': dprint(f"Fixing image to force jpg conversion: {img_url}") img_fix = Image.open(f"{dir_utils.get_temp_med_dir()}/{dir_name}/_image.{img_ext}") img_fix.convert('RGB').save(f"{dir_utils.get_temp_med_dir()}/{dir_name}/_image.jpg") dir_utils.remove_file("_image.png", f'{dir_utils.get_temp_med_dir()}/{dir_name}') dprint(f"Downloaded image from: {img_url}")
Example #5
Source File: topography.py From typhon with MIT License | 6 votes |
def download_tile(name): """ This function will download and extract the tile with the given name. The data is stored in the path pointed to by the :code:`_data_path` attribute of the module. Args: name(str): The name of the tile to download. """ base_url = "https://dds.cr.usgs.gov/srtm/version2_1/SRTM30" url = base_url + "/" + name + "/" + name + ".dem.zip" r = urllib.request.urlopen(url) filename = os.path.join(_get_data_path(), name + ".dem.zip") path = os.path.join(filename) with open(path, 'wb') as f: shutil.copyfileobj(r, f) # Extract zip file. with zipfile.ZipFile(filename, "r") as zip_ref: zip_ref.extractall(os.path.dirname(filename))
Example #6
Source File: chromecast-beam.py From pulseaudio-dlna with GNU General Public License v3.0 | 6 votes |
def do_GET(self): client_address = self.client_address[0] logger.info('Serving transcoded media file to {} ...'.format( client_address)) self.send_head() path = self.translate_path(self.path) command = VLCEncoderSettings.command(path) logger.info('Launching {}'.format(command)) try: with open(os.devnull, 'w') as dev_null: encoder_process = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=dev_null) shutil.copyfileobj(encoder_process.stdout, self.wfile) except: logger.info('Connection from {} closed.'.format(client_address)) logger.debug(traceback.format_exc()) finally: pid = encoder_process.pid logger.info('Terminating process {}'.format(pid)) try: os.kill(pid, signal.SIGKILL) except: pass
Example #7
Source File: dataset.py From dockerfiles with Apache License 2.0 | 6 votes |
def download(directory, filename): """Download (and unzip) a file from the MNIST dataset if not already done.""" filepath = os.path.join(directory, filename) if tf.gfile.Exists(filepath): return filepath if not tf.gfile.Exists(directory): tf.gfile.MakeDirs(directory) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz' _, zipped_filepath = tempfile.mkstemp(suffix='.gz') print('Downloading %s to %s' % (url, zipped_filepath)) urllib.request.urlretrieve(url, zipped_filepath) with gzip.open(zipped_filepath, 'rb') as f_in, \ tf.gfile.Open(filepath, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) os.remove(zipped_filepath) return filepath
Example #8
Source File: dataset.py From dockerfiles with Apache License 2.0 | 6 votes |
def download(directory, filename): """Download (and unzip) a file from the MNIST dataset if not already done.""" filepath = os.path.join(directory, filename) if tf.gfile.Exists(filepath): return filepath if not tf.gfile.Exists(directory): tf.gfile.MakeDirs(directory) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz' _, zipped_filepath = tempfile.mkstemp(suffix='.gz') print('Downloading %s to %s' % (url, zipped_filepath)) urllib.request.urlretrieve(url, zipped_filepath) with gzip.open(zipped_filepath, 'rb') as f_in, \ tf.gfile.Open(filepath, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) os.remove(zipped_filepath) return filepath
Example #9
Source File: cli.py From stdpopsim with GNU General Public License v3.0 | 6 votes |
def write_output(ts, args): """ Adds provenance information to the specified tree sequence (ensuring that the output is reproducible) and write the resulting tree sequence to output. """ tables = ts.dump_tables() logger.debug("Updating provenance") provenance = get_provenance_dict() tables.provenances.add_row(json.dumps(provenance)) ts = tables.tree_sequence() if args.output is None: # There's no way to get tskit to write directly to stdout, so we write # to a tempfile first. with tempfile.TemporaryDirectory() as tmpdir: tmpfile = pathlib.Path(tmpdir) / "tmp.trees" ts.dump(tmpfile) with open(tmpfile, "rb") as f: shutil.copyfileobj(f, sys.stdout.buffer) else: logger.debug(f"Writing to {args.output}") ts.dump(args.output)
Example #10
Source File: tarfile.py From jawfish with MIT License | 6 votes |
def copyfileobj(src, dst, length=None): """Copy length bytes from fileobj src to fileobj dst. If length is None, copy the entire content. """ if length == 0: return if length is None: shutil.copyfileobj(src, dst) return BUFSIZE = 16 * 1024 blocks, remainder = divmod(length, BUFSIZE) for b in range(blocks): buf = src.read(BUFSIZE) if len(buf) < BUFSIZE: raise IOError("end of file reached") dst.write(buf) if remainder != 0: buf = src.read(remainder) if len(buf) < remainder: raise IOError("end of file reached") dst.write(buf) return
Example #11
Source File: tarfile.py From jawfish with MIT License | 6 votes |
def addfile(self, tarinfo, fileobj=None): """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is given, tarinfo.size bytes are read from it and added to the archive. You can create TarInfo objects using gettarinfo(). On Windows platforms, `fileobj' should always be opened with mode 'rb' to avoid irritation about the file size. """ self._check("aw") tarinfo = copy.copy(tarinfo) buf = tarinfo.tobuf(self.format, self.encoding, self.errors) self.fileobj.write(buf) self.offset += len(buf) # If there's data to follow, append it. if fileobj is not None: copyfileobj(fileobj, self.fileobj, tarinfo.size) blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) if remainder > 0: self.fileobj.write(NUL * (BLOCKSIZE - remainder)) blocks += 1 self.offset += blocks * BLOCKSIZE self.members.append(tarinfo)
Example #12
Source File: text.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def _get_data(self): archive_file_name, archive_hash = self._archive_file data_file_name, data_hash = self._data_file[self._segment] path = os.path.join(self._root, data_file_name) if not os.path.exists(path) or not check_sha1(path, data_hash): namespace = 'gluon/dataset/'+self._namespace downloaded_file_path = download(_get_repo_file_url(namespace, archive_file_name), path=self._root, sha1_hash=archive_hash) with zipfile.ZipFile(downloaded_file_path, 'r') as zf: for member in zf.namelist(): filename = os.path.basename(member) if filename: dest = os.path.join(self._root, filename) with zf.open(member) as source, \ open(dest, "wb") as target: shutil.copyfileobj(source, target) data, label = self._read_batch(path) self._data = nd.array(data, dtype=data.dtype).reshape((-1, self._seq_len)) self._label = nd.array(label, dtype=label.dtype).reshape((-1, self._seq_len))
Example #13
Source File: aff4.py From pyaff4 with Apache License 2.0 | 6 votes |
def extractFromVolume(container_urn, volume, imageURNs, destFolder): printVolumeInfo(container_urn.original_filename, volume) resolver = volume.resolver for imageUrn in imageURNs: imageUrn = utils.SmartUnicode(imageUrn) pathName = next(resolver.QuerySubjectPredicate(volume.urn, imageUrn, volume.lexicon.pathName)) with resolver.AFF4FactoryOpen(imageUrn) as srcStream: if destFolder != "-": pathName = escaping.arnPathFragment_from_path(pathName.value) while pathName.startswith("/"): pathName = pathName[1:] destFile = os.path.join(destFolder, pathName) if not os.path.exists(os.path.dirname(destFile)): try: os.makedirs(os.path.dirname(destFile)) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise with open(destFile, "wb") as destStream: shutil.copyfileobj(srcStream, destStream, length=32 * 2014) print("\tExtracted %s to %s" % (pathName, destFile)) else: shutil.copyfileobj(srcStream, sys.stdout)
Example #14
Source File: fels.py From fetchLandsatSentinelFromGoogleCloud with MIT License | 6 votes |
def download_metadata_file(url, outputdir, program): """Download and unzip the catalogue files.""" zipped_index_path = os.path.join(outputdir, 'index_' + program + '.csv.gz') if not os.path.isfile(zipped_index_path): if not os.path.exists(os.path.dirname(zipped_index_path)): os.makedirs(os.path.dirname(zipped_index_path)) print("Downloading Metadata file...") content = urlopen(url) with open(zipped_index_path, 'wb') as f: shutil.copyfileobj(content, f) index_path = os.path.join(outputdir, 'index_' + program + '.csv') if not os.path.isfile(index_path): print("Unzipping Metadata file...") with gzip.open(zipped_index_path) as gzip_index, open(index_path, 'wb') as f: shutil.copyfileobj(gzip_index, f) return index_path
Example #15
Source File: auto_dataset.py From aetros-cli with MIT License | 6 votes |
def download_image(url, path): if os.path.exists(path): return True headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36' } try: r = requests.get(url, stream=True, timeout=9, headers=headers) if r.status_code == 200: with open(path, 'wb') as f: r.raw.decode_content = True shutil.copyfileobj(r.raw, f) return True else: print(("Could not download image %s, response %d" % (url, r.status_code))) except Exception as e: if hasattr(e, 'message'): print(("Could not download image %s due to %s" % (url, e.message))) else: print(("Could not download image %s due to %s" % (url, repr(e)))) return False
Example #16
Source File: dataset.py From cloudml-samples with Apache License 2.0 | 6 votes |
def download(directory, filename): """Download (and unzip) a file from the MNIST dataset if not already done.""" filepath = os.path.join(directory, filename) if tf.gfile.Exists(filepath): return filepath if not tf.gfile.Exists(directory): tf.gfile.MakeDirs(directory) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz' _, zipped_filepath = tempfile.mkstemp(suffix='.gz') print('Downloading %s to %s' % (url, zipped_filepath)) urllib.request.urlretrieve(url, zipped_filepath) with gzip.open(zipped_filepath, 'rb') as f_in, \ tf.gfile.Open(filepath, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) os.remove(zipped_filepath) return filepath
Example #17
Source File: file_util.py From snowflake-connector-python with Apache License 2.0 | 6 votes |
def compress_file_with_gzip(file_name, tmp_dir): """Compresses a file with GZIP. Args: file_name: Local path to file to be compressed. tmp_dir: Temporary directory where an GZIP file will be created. Returns: A tuple of gzip file name and size. """ logger = getLogger(__name__) base_name = os.path.basename(file_name) gzip_file_name = os.path.join(tmp_dir, base_name + '_c.gz') logger.debug('gzip file: %s, original file: %s', gzip_file_name, file_name) fr = open(file_name, 'rb') fw = gzip.GzipFile(gzip_file_name, 'wb') shutil.copyfileobj(fr, fw) fw.close() fr.close() SnowflakeFileUtil.normalize_gzip_header(gzip_file_name) statinfo = os.stat(gzip_file_name) return gzip_file_name, statinfo.st_size
Example #18
Source File: mods.py From fac with MIT License | 5 votes |
def _extract_member(self, zipfile, arcname, dest): # Create all upper directories if necessary. upperdirs = os.path.dirname(dest) if upperdirs and not os.path.exists(upperdirs): os.makedirs(upperdirs) if arcname[-1] == '/': if not os.path.isdir(dest): os.mkdir(dest) return with zipfile.open(arcname) as source, \ open(dest, 'wb') as target: shutil.copyfileobj(source, target)
Example #19
Source File: packages.py From arches with GNU Affero General Public License v3.0 | 5 votes |
def export_business_data( self, data_dest=None, file_format=None, config_file=None, graph=None, single_file=False, ): try: resource_exporter = ResourceExporter(file_format, configs=config_file, single_file=single_file) except KeyError as e: utils.print_message("{0} is not a valid export file format.".format(file_format)) sys.exit() except MissingConfigException as e: utils.print_message("No mapping file specified. Please rerun this command with the '-c' parameter populated.") sys.exit() if data_dest != "": try: data = resource_exporter.export(graph_id=graph, resourceinstanceids=None) except MissingGraphException as e: print(utils.print_message("No resource graph specified. Please rerun this command with the '-g' parameter populated.")) sys.exit() for file in data: with open(os.path.join(data_dest, file["name"]), "w") as f: bufsize = 16 * 1024 file["outputfile"].seek(0) shutil.copyfileobj(file["outputfile"], f, bufsize) # with open(os.path.join(data_dest, file['name']), 'wb') as f: # f.write(file['outputfile'].getvalue()) else: utils.print_message("No destination directory specified. Please rerun this command with the '-d' parameter populated.") sys.exit()
Example #20
Source File: models.py From dpk with GNU General Public License v3.0 | 5 votes |
def saveFile(self, path, raw): with open(path, 'wb') as f: shutil.copyfileobj(raw, f)
Example #21
Source File: aff4.py From pyaff4 with Apache License 2.0 | 5 votes |
def extractAllFromVolume(container_urn, volume, destFolder): printVolumeInfo(container_urn.original_filename, volume) resolver = volume.resolver for imageUrn in resolver.QueryPredicateObject(volume.urn, lexicon.AFF4_TYPE, lexicon.standard11.FileImage): imageUrn = utils.SmartUnicode(imageUrn) pathName = next(resolver.QuerySubjectPredicate(volume.urn, imageUrn, lexicon.standard11.pathName)).value if pathName.startswith("/"): pathName = "." + pathName with resolver.AFF4FactoryOpen(imageUrn) as srcStream: if destFolder != "-": destFile = os.path.join(destFolder, pathName) if not os.path.exists(os.path.dirname(destFile)): try: os.makedirs(os.path.dirname(destFile)) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise with open(destFile, "wb") as destStream: shutil.copyfileobj(srcStream, destStream) print("\tExtracted %s to %s" % (pathName, destFile)) lastWritten = nextOrNone( resolver.QuerySubjectPredicate(volume.urn, imageUrn, lexicon.standard11.lastWritten)) lastAccessed = nextOrNone( resolver.QuerySubjectPredicate(volume.urn, imageUrn, lexicon.standard11.lastAccessed)) recordChanged = nextOrNone( resolver.QuerySubjectPredicate(volume.urn, imageUrn, lexicon.standard11.recordChanged)) birthTime = nextOrNone( resolver.QuerySubjectPredicate(volume.urn, imageUrn, lexicon.standard11.birthTime)) logical.resetTimestamps(destFile, lastWritten, lastAccessed, recordChanged, birthTime) else: shutil.copyfileobj(srcStream, sys.stdout)
Example #22
Source File: SortSparseMatrix.py From single_cell_portal with BSD 3-Clause "New" or "Revised" License | 5 votes |
def gunzip_shutil(source_filepath, dest_filepath, block_size=65536): with gzip.open(source_filepath, 'rb') as s_file, \ open(dest_filepath, 'wb') as d_file: shutil.copyfileobj(s_file, d_file, block_size)
Example #23
Source File: download_data.py From CapsLayer with Apache License 2.0 | 5 votes |
def download_and_uncompress_zip(URL, dataset_dir, force=False): ''' Args: URL: the download links for data dataset_dir: the path to save data force: redownload data ''' filename = URL.split('/')[-1] filepath = os.path.join(dataset_dir, filename) if not os.path.exists(dataset_dir): os.mkdir(dataset_dir) extract_to = os.path.splitext(filepath)[0] def download_progress(count, block_size, total_size): sys.stdout.write("\r>> Downloading %s %.1f%%" % (filename, float(count * block_size) / float(total_size) * 100.)) sys.stdout.flush() if not force and os.path.exists(filepath): print("file %s already exist" % (filename)) else: filepath, _ = urllib.request.urlretrieve(URL, filepath, download_progress) print() print('Successfully Downloaded', filename) # with zipfile.ZipFile(filepath) as fd: with gzip.open(filepath, 'rb') as f_in, open(extract_to, 'wb') as f_out: print('Extracting ', filename) shutil.copyfileobj(f_in, f_out) print('Successfully extracted') print()
Example #24
Source File: download_utils.py From CapsLayer with Apache License 2.0 | 5 votes |
def download_and_uncompress_zip(URL, dataset_dir, force=False): ''' Args: URL: the download links for data dataset_dir: the path to save data force: redownload data ''' filename = URL.split('/')[-1] filepath = os.path.join(dataset_dir, filename) if not os.path.exists(dataset_dir): os.makedirs(dataset_dir) extract_to = os.path.splitext(filepath)[0] def download_progress(count, block_size, total_size): sys.stdout.write("\r>> Downloading %s %.1f%%" % (filename, float(count * block_size) / float(total_size) * 100.)) sys.stdout.flush() if not force and os.path.exists(filepath): print("file %s already exist" % (filename)) return 0 else: filepath, _ = urllib.request.urlretrieve(URL, filepath, download_progress) print() print('Successfully Downloaded', filename) # with zipfile.ZipFile(filepath) as fd: with gzip.open(filepath, 'rb') as f_in, open(extract_to, 'wb') as f_out: print('Extracting ', filename) shutil.copyfileobj(f_in, f_out) print('Successfully extracted') print()
Example #25
Source File: archive.py From mquery with GNU Affero General Public License v3.0 | 5 votes |
def filter(self, orig_name: str, file_path: str) -> Optional[str]: tmp = tempfile.NamedTemporaryFile() self.tmpfiles.append(tmp) if orig_name.endswith(".gz"): with gzip.open(file_path, "rb") as f_in: with open(tmp.name, "wb") as f_out: shutil.copyfileobj(f_in, f_out) return tmp.name return file_path
Example #26
Source File: utils.py From dataiku-contrib with Apache License 2.0 | 5 votes |
def download_trained_weights(coco_model_path, verbose=1): """Download COCO trained weights from Releases. coco_model_path: local path of COCO trained weights """ if verbose > 0: print("Downloading pretrained model to " + coco_model_path + " ...") with urllib.request.urlopen(COCO_MODEL_URL) as resp, open(coco_model_path, 'wb') as out: shutil.copyfileobj(resp, out) if verbose > 0: print("... done downloading pretrained model!")
Example #27
Source File: fs-provider.py From dataiku-contrib with Apache License 2.0 | 5 votes |
def read(self, path, stream, limit): adls_path = self.get_adls_lnt_path(path) if not self.adls_client.exists(adls_path): raise Exception('Path doesn t exist : %s' % adls_path) with self.adls_client.open(adls_path, 'rb') as f: shutil.copyfileobj(f, stream)
Example #28
Source File: sagemaker_pipe.py From sagemaker-xgboost-container with Apache License 2.0 | 5 votes |
def gunzip(src_retriever, tmp_path, sink): with open(tmp_path, 'wb') as tmp: src_retriever(tmp) with gzip.open(tmp_path, 'rb') as inflated: shutil.copyfileobj(inflated, sink)
Example #29
Source File: sagemaker_pipe.py From sagemaker-xgboost-container with Apache License 2.0 | 5 votes |
def local_retriever(src, sink): if os.path.isfile(src): logging.debug('streaming file: {}'.format(src)) with open(src, 'rb') as src: shutil.copyfileobj(src, sink) else: for root, dirs, files in os.walk(src): logging.debug('file list: {}'.format(files)) for file in files: src_path = root + '/' + file logging.debug('streaming file: {}'.format(src_path)) if os.path.isfile(src_path): # ignore special files with open(src_path, 'rb') as src: shutil.copyfileobj(src, sink)
Example #30
Source File: instaloader.py From instaloader with MIT License | 5 votes |
def save_caption(self, filename: str, mtime: datetime, caption: str) -> None: """Updates picture caption / Post metadata info""" def _elliptify(caption): pcaption = caption.replace('\n', ' ').strip() return '[' + ((pcaption[:29] + u"\u2026") if len(pcaption) > 31 else pcaption) + ']' filename += '.txt' caption += '\n' pcaption = _elliptify(caption) bcaption = caption.encode("UTF-8") with suppress(FileNotFoundError): with open(filename, 'rb') as file: file_caption = file.read() if file_caption.replace(b'\r\n', b'\n') == bcaption.replace(b'\r\n', b'\n'): try: self.context.log(pcaption + ' unchanged', end=' ', flush=True) except UnicodeEncodeError: self.context.log('txt unchanged', end=' ', flush=True) return None else: def get_filename(index): return filename if index == 0 else '{0}_old_{2:02}{1}'.format(*os.path.splitext(filename), index) i = 0 while os.path.isfile(get_filename(i)): i = i + 1 for index in range(i, 0, -1): os.rename(get_filename(index - 1), get_filename(index)) try: self.context.log(_elliptify(file_caption.decode("UTF-8")) + ' updated', end=' ', flush=True) except UnicodeEncodeError: self.context.log('txt updated', end=' ', flush=True) try: self.context.log(pcaption, end=' ', flush=True) except UnicodeEncodeError: self.context.log('txt', end=' ', flush=True) with open(filename, 'wb') as text_file: shutil.copyfileobj(BytesIO(bcaption), text_file) os.utime(filename, (datetime.now().timestamp(), mtime.timestamp()))