Python gluoncv.utils.download() Examples
The following are 30
code examples of gluoncv.utils.download().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
gluoncv.utils
, or try the search function
.
Example #1
Source File: coco_tracking.py From gluon-cv with Apache License 2.0 | 6 votes |
def download_coco(args, overwrite=False): """download COCO dataset and Unzip to download_dir""" _DOWNLOAD_URLS = [ ('http://images.cocodataset.org/zips/train2017.zip', '10ad623668ab00c62c096f0ed636d6aff41faca5'), ('http://images.cocodataset.org/annotations/annotations_trainval2017.zip', '8551ee4bb5860311e79dace7e79cb91e432e78b3'), ('http://images.cocodataset.org/zips/val2017.zip', '4950dc9d00dbe1c933ee0170f5797584351d2a41'), ] if not os.path.isdir(args.download_dir): makedirs(args.download_dir) for url, checksum in _DOWNLOAD_URLS: filename = download(url, path=args.download_dir, overwrite=overwrite, sha1_hash=checksum) with zipfile.ZipFile(filename) as zf: zf.extractall(path=args.download_dir)
Example #2
Source File: imagenet.py From panoptic-fpn-gluon with Apache License 2.0 | 6 votes |
def parse_args(): parser = argparse.ArgumentParser( description='Setup the ImageNet dataset.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--download-dir', required=True, help="The directory that contains downloaded tar files") parser.add_argument('--target-dir', default=_TARGET_DIR, help="The directory to store extracted images") parser.add_argument('--checksum', action='store_true', help="If check integrity before extracting.") parser.add_argument('--with-rec', action='store_true', help="If build image record files.") parser.add_argument('--num-thread', type=int, default=1, help="Number of threads to use when building image record file.") args = parser.parse_args() return args
Example #3
Source File: pascal_voc.py From panoptic-fpn-gluon with Apache License 2.0 | 6 votes |
def download_aug(path, overwrite=False): _AUG_DOWNLOAD_URLS = [ ('http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz', '7129e0a480c2d6afb02b517bb18ac54283bfaa35')] makedirs(path) for url, checksum in _AUG_DOWNLOAD_URLS: filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) # extract with tarfile.open(filename) as tar: tar.extractall(path=path) shutil.move(os.path.join(path, 'benchmark_RELEASE'), os.path.join(path, 'VOCaug')) filenames = ['VOCaug/dataset/train.txt', 'VOCaug/dataset/val.txt'] # generate trainval.txt with open(os.path.join(path, 'VOCaug/dataset/trainval.txt'), 'w') as outfile: for fname in filenames: fname = os.path.join(path, fname) with open(fname) as infile: for line in infile: outfile.write(line)
Example #4
Source File: pascal_voc.py From panoptic-fpn-gluon with Apache License 2.0 | 6 votes |
def download_voc(path, overwrite=False): _DOWNLOAD_URLS = [ ('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', '34ed68851bce2a36e2a223fa52c661d592c66b3c'), ('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', '41a8d6e12baa5ab18ee7f8f8029b9e11805b4ef1'), ('http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar', '4e443f8a2eca6b1dac8a6c57641b67dd40621a49')] makedirs(path) for url, checksum in _DOWNLOAD_URLS: filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) # extract with tarfile.open(filename) as tar: tar.extractall(path=path) ##################################################################################### # Download and extract the VOC augmented segmentation dataset into ``path``
Example #5
Source File: cityscapes.py From panoptic-fpn-gluon with Apache License 2.0 | 6 votes |
def download_city(path, overwrite=False): _CITY_DOWNLOAD_URLS = [ ('gtFine_trainvaltest.zip', '99f532cb1af174f5fcc4c5bc8feea8c66246ddbc'), ('leftImg8bit_trainvaltest.zip', '2c0b77ce9933cc635adda307fbba5566f5d9d404')] download_dir = os.path.join(path, 'downloads') makedirs(download_dir) for filename, checksum in _CITY_DOWNLOAD_URLS: if not check_sha1(filename, checksum): raise UserWarning('File {} is downloaded but the content hash does not match. ' \ 'The repo may be outdated or download may be incomplete. ' \ 'If the "repo_url" is overridden, consider switching to ' \ 'the default repo.'.format(filename)) # extract with zipfile.ZipFile(filename,"r") as zip_ref: zip_ref.extractall(path=path) print("Extracted", filename)
Example #6
Source File: prepare_coco.py From mxnet-centernet with MIT License | 6 votes |
def download_coco(path, overwrite=False): _DOWNLOAD_URLS = [ ('http://images.cocodataset.org/zips/train2017.zip', '10ad623668ab00c62c096f0ed636d6aff41faca5'), ('http://images.cocodataset.org/annotations/annotations_trainval2017.zip', '8551ee4bb5860311e79dace7e79cb91e432e78b3'), ('http://images.cocodataset.org/zips/val2017.zip', '4950dc9d00dbe1c933ee0170f5797584351d2a41'), # ('http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip', # '46cdcf715b6b4f67e980b529534e79c2edffe084'), # test2017.zip, for those who want to attend the competition. # ('http://images.cocodataset.org/zips/test2017.zip', # '4e443f8a2eca6b1dac8a6c57641b67dd40621a49'), ] makedirs(path) for url, checksum in _DOWNLOAD_URLS: filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) # extract with zipfile.ZipFile(filename) as zf: zf.extractall(path=path)
Example #7
Source File: pascal_voc.py From MXNet-Deep-Learning-in-Action with Apache License 2.0 | 6 votes |
def download_voc(path, overwrite=False): _DOWNLOAD_URLS = [ ('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', '34ed68851bce2a36e2a223fa52c661d592c66b3c'), ('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', '41a8d6e12baa5ab18ee7f8f8029b9e11805b4ef1'), ('http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar', '4e443f8a2eca6b1dac8a6c57641b67dd40621a49')] makedirs(path) for url, checksum in _DOWNLOAD_URLS: filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) # extract with tarfile.open(filename) as tar: tar.extractall(path=path) ##################################################################################### # Download and extract the VOC augmented segementation dataset into ``path``
Example #8
Source File: ilsvrc_vid.py From gluon-cv with Apache License 2.0 | 6 votes |
def main(args): # download VID dataset download_VID(args) print('VID dataset has already download completed') VID_base_path = os.path.join(args.download_dir, 'ILSVRC2015') ann_base_path = os.path.join(VID_base_path, 'Annotations/VID/train/') symlink(args) # Format XML and save it in JSON parse_vid(ann_base_path, args) print('VID dataset json has already generat completed') # crop VID dataset for prepare for tracking par_crop(args, ann_base_path) print('VID dataset has already crop completed') # generat VID json for prepare for tracking gen_json(args) print('VID dataset has already generat completed')
Example #9
Source File: pascal_voc.py From MXNet-Deep-Learning-in-Action with Apache License 2.0 | 6 votes |
def download_aug(path, overwrite=False): _AUG_DOWNLOAD_URLS = [ ('http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz', '7129e0a480c2d6afb02b517bb18ac54283bfaa35')] makedirs(path) for url, checksum in _AUG_DOWNLOAD_URLS: filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) # extract with tarfile.open(filename) as tar: tar.extractall(path=path) shutil.move(os.path.join(path, 'benchmark_RELEASE'), os.path.join(path, 'VOCaug')) filenames = ['VOCaug/dataset/train.txt', 'VOCaug/dataset/val.txt'] # generate trainval.txt with open(os.path.join(path, 'VOCaug/dataset/trainval.txt'), 'w') as outfile: for fname in filenames: fname = os.path.join(path, fname) with open(fname) as infile: for line in infile: outfile.write(line)
Example #10
Source File: imagenet.py From MXNet-Deep-Learning-in-Action with Apache License 2.0 | 6 votes |
def parse_args(): parser = argparse.ArgumentParser( description='Setup the ImageNet dataset.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--download-dir', required=True, help="The directory that contains downloaded tar files") parser.add_argument('--target-dir', default=_TARGET_DIR, help="The directory to store extracted images") parser.add_argument('--checksum', action='store_true', help="If check integrity before extracting.") parser.add_argument('--with-rec', action='store_true', help="If build image record files.") parser.add_argument('--num-thread', type=int, default=1, help="Number of threads to use when building image record file.") args = parser.parse_args() return args
Example #11
Source File: imagenet.py From gluon-cv with Apache License 2.0 | 6 votes |
def parse_args(): parser = argparse.ArgumentParser( description='Setup the ImageNet dataset.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--download-dir', required=True, help="The directory that contains downloaded tar files") parser.add_argument('--target-dir', default=_TARGET_DIR, help="The directory to store extracted images") parser.add_argument('--checksum', action='store_true', help="If check integrity before extracting.") parser.add_argument('--with-rec', action='store_true', help="If build image record files.") parser.add_argument('--num-thread', type=int, default=1, help="Number of threads to use when building image record file.") args = parser.parse_args() return args
Example #12
Source File: pascal_voc.py From gluon-cv with Apache License 2.0 | 6 votes |
def download_aug(path, overwrite=False): _AUG_DOWNLOAD_URLS = [ ('http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz', '7129e0a480c2d6afb02b517bb18ac54283bfaa35')] makedirs(path) for url, checksum in _AUG_DOWNLOAD_URLS: filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) # extract with tarfile.open(filename) as tar: tar.extractall(path=path) shutil.move(os.path.join(path, 'benchmark_RELEASE'), os.path.join(path, 'VOCaug')) filenames = ['VOCaug/dataset/train.txt', 'VOCaug/dataset/val.txt'] # generate trainval.txt with open(os.path.join(path, 'VOCaug/dataset/trainval.txt'), 'w') as outfile: for fname in filenames: fname = os.path.join(path, fname) with open(fname) as infile: for line in infile: outfile.write(line)
Example #13
Source File: pascal_voc.py From gluon-cv with Apache License 2.0 | 6 votes |
def download_voc(path, overwrite=False): _DOWNLOAD_URLS = [ ('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', '34ed68851bce2a36e2a223fa52c661d592c66b3c'), ('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', '41a8d6e12baa5ab18ee7f8f8029b9e11805b4ef1'), ('http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar', '4e443f8a2eca6b1dac8a6c57641b67dd40621a49')] makedirs(path) for url, checksum in _DOWNLOAD_URLS: filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) # extract with tarfile.open(filename) as tar: tar.extractall(path=path) ##################################################################################### # Download and extract the VOC augmented segmentation dataset into ``path``
Example #14
Source File: prepare_visualgenome.py From dgl with Apache License 2.0 | 6 votes |
def download_vg(path, overwrite=False): _DOWNLOAD_URLS = [ ('https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip', 'a055367f675dd5476220e9b93e4ca9957b024b94'), ('https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip', '2add3aab77623549e92b7f15cda0308f50b64ecf'), ] makedirs(path) for url, checksum in _DOWNLOAD_URLS: filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) # extract if filename.endswith('zip'): with zipfile.ZipFile(filename) as zf: zf.extractall(path=path) # move all images into folder `VG_100K` vg_100k_path = os.path.join(path, 'VG_100K') vg_100k_2_path = os.path.join(path, 'VG_100K_2') files_2 = os.listdir(vg_100k_2_path) for fl in files_2: shutil.move(os.path.join(vg_100k_2_path, fl), os.path.join(vg_100k_path, fl))
Example #15
Source File: cityscapes.py From gluon-cv with Apache License 2.0 | 6 votes |
def download_city(path, overwrite=False): _CITY_DOWNLOAD_URLS = [ ('gtFine_trainvaltest.zip', '99f532cb1af174f5fcc4c5bc8feea8c66246ddbc'), ('leftImg8bit_trainvaltest.zip', '2c0b77ce9933cc635adda307fbba5566f5d9d404')] download_dir = os.path.join(path, 'downloads') makedirs(download_dir) for filename, checksum in _CITY_DOWNLOAD_URLS: if not check_sha1(filename, checksum): raise UserWarning('File {} is downloaded but the content hash does not match. ' \ 'The repo may be outdated or download may be incomplete. ' \ 'If the "repo_url" is overridden, consider switching to ' \ 'the default repo.'.format(filename)) # extract with zipfile.ZipFile(filename,"r") as zip_ref: zip_ref.extractall(path=path) print("Extracted", filename)
Example #16
Source File: demo.py From gluon-cv with Apache License 2.0 | 6 votes |
def read_data(opt): """ Pre-process data -------------------- Next we need a video or video frame if you want to test video frame, you can change opt.video_loader to False and opt.data-dir is your video frame path. meanwhile you need first frame object coordinates in opt.gt-bbox gt_bbox is first frame object coordinates, and it is bbox(center_x,center_y,weight,height) """ video_frames = [] if opt.video_loader: im_video = utils.download(opt.video_path) cap = cv2.VideoCapture(im_video) while(True): ret, img = cap.read() if not ret: break video_frames.append(img) else: for data in sorted(os.listdir(opt.data_dir)): video_frames.append(cv2.imread(os.path.join(opt.data_dir, data))) return video_frames
Example #17
Source File: mscoco.py From cascade_rcnn_gluon with Apache License 2.0 | 6 votes |
def download_coco(path, overwrite=False): _DOWNLOAD_URLS = [ ('http://images.cocodataset.org/zips/train2017.zip', '10ad623668ab00c62c096f0ed636d6aff41faca5'), ('http://images.cocodataset.org/annotations/annotations_trainval2017.zip', '8551ee4bb5860311e79dace7e79cb91e432e78b3'), ('http://images.cocodataset.org/zips/val2017.zip', '4950dc9d00dbe1c933ee0170f5797584351d2a41'), ('http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip', 'e7aa0f7515c07e23873a9f71d9095b06bcea3e12'), # test2017.zip, for those who want to attend the competition. # ('http://images.cocodataset.org/zips/test2017.zip', # '4e443f8a2eca6b1dac8a6c57641b67dd40621a49'), ] makedirs(path) for url, checksum in _DOWNLOAD_URLS: filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) # extract with zipfile.ZipFile(filename) as zf: zf.extractall(path=path)
Example #18
Source File: market1501.py From gluon-cv with Apache License 2.0 | 6 votes |
def main(): args = parse_args() name = "Market-1501-v15.09.15" url = "http://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/{name}.zip".format(name=name) root = osp.expanduser(args.download_dir) makedirs(root) fpath = osp.join(root, name + '.zip') exdir = osp.join(root, name) if not osp.exists(fpath) and not osp.isdir(exdir) and args.no_download: raise ValueError(('{} dataset archive not found, make sure it is present.' ' Or you should not disable "--no-download" to grab it'.format(fpath))) # Download by default if not args.no_download: print('Downloading dataset') download(url, fpath, overwrite=False) print('Dataset downloaded') # Extract dataset if fresh copy downloaded or existing archive is yet to be extracted if not args.no_download or not osp.isdir(exdir): extract(fpath, root) make_list(exdir)
Example #19
Source File: pascal_voc.py From cascade_rcnn_gluon with Apache License 2.0 | 6 votes |
def download_aug(path, overwrite=False): _AUG_DOWNLOAD_URLS = [ ('http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz', '7129e0a480c2d6afb02b517bb18ac54283bfaa35')] makedirs(path) for url, checksum in _AUG_DOWNLOAD_URLS: filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) # extract with tarfile.open(filename) as tar: tar.extractall(path=path) shutil.move(os.path.join(path, 'benchmark_RELEASE'), os.path.join(path, 'VOCaug')) filenames = ['VOCaug/dataset/train.txt', 'VOCaug/dataset/val.txt'] # generate trainval.txt with open(os.path.join(path, 'VOCaug/dataset/trainval.txt'), 'w') as outfile: for fname in filenames: fname = os.path.join(path, fname) with open(fname) as infile: for line in infile: outfile.write(line)
Example #20
Source File: imagenet.py From cascade_rcnn_gluon with Apache License 2.0 | 6 votes |
def parse_args(): parser = argparse.ArgumentParser( description='Setup the ImageNet dataset.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--download-dir', required=True, help="The directory that contains downloaded tar files") parser.add_argument('--target-dir', default=_TARGET_DIR, help="The directory to store extracted images") parser.add_argument('--checksum', action='store_true', help="If check integrity before extracting.") parser.add_argument('--with-rec', action='store_true', help="If build image record files.") parser.add_argument('--num-thread', type=int, default=1, help="Number of threads to use when building image record file.") args = parser.parse_args() return args
Example #21
Source File: imagenet.py From MMD-GAN with Apache License 2.0 | 6 votes |
def parse_args(): parser = argparse.ArgumentParser( description='Setup the ImageNet dataset.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--download-dir', required=True, help="The directory that contains downloaded tar files") parser.add_argument('--target-dir', default=_TARGET_DIR, help="The directory to store extracted images") parser.add_argument('--checksum', action='store_true', help="If check integrity before extracting.") parser.add_argument('--with-rec', action='store_true', help="If build image record files.") parser.add_argument('--num-thread', type=int, default=1, help="Number of threads to use when building image record file.") args = parser.parse_args() return args
Example #22
Source File: pascal_voc.py From MXNet-Deep-Learning-in-Action with Apache License 2.0 | 5 votes |
def parse_args(): parser = argparse.ArgumentParser( description='Initialize PASCAL VOC dataset.', epilog='Example: python pascal_voc.py --download-dir ~/VOCdevkit', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--download-dir', type=str, default='~/VOCdevkit/', help='dataset directory on disk') parser.add_argument('--no-download', action='store_true', help='disable automatic download if set') parser.add_argument('--overwrite', action='store_true', help='overwrite downloaded files if set, in case they are corrputed') args = parser.parse_args() return args ##################################################################################### # Download and extract VOC datasets into ``path``
Example #23
Source File: prepare_visualgenome.py From dgl with Apache License 2.0 | 5 votes |
def parse_args(): parser = argparse.ArgumentParser( description='Initialize Visual Genome dataset.', epilog='Example: python visualgenome.py --download-dir ~/visualgenome', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--download-dir', type=str, default='~/visualgenome/', help='dataset directory on disk') parser.add_argument('--no-download', action='store_true', help='disable automatic download if set') parser.add_argument('--overwrite', action='store_true', help='overwrite downloaded files if set, in case they are corrupted') args = parser.parse_args() return args
Example #24
Source File: imagenet.py From panoptic-fpn-gluon with Apache License 2.0 | 5 votes |
def build_rec_process(img_dir, train=False, num_thread=1): rec_dir = os.path.abspath(os.path.join(img_dir, '../rec')) makedirs(rec_dir) prefix = 'train' if train else 'val' print('Building ImageRecord file for ' + prefix + ' ...') to_path = rec_dir # download lst file and im2rec script script_path = os.path.join(rec_dir, 'im2rec.py') script_url = 'https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/im2rec.py' download(script_url, script_path) lst_path = os.path.join(rec_dir, prefix + '.lst') lst_url = 'http://data.mxnet.io/models/imagenet/resnet/' + prefix + '.lst' download(lst_url, lst_path) # execution import sys cmd = [ sys.executable, script_path, rec_dir, img_dir, '--recursive', '--pass-through', '--pack-label', '--num-thread', str(num_thread) ] subprocess.call(cmd) os.remove(script_path) os.remove(lst_path) print('ImageRecord file for ' + prefix + ' has been built!')
Example #25
Source File: download_dataset.py From panoptic-fpn-gluon with Apache License 2.0 | 5 votes |
def download_data(path,file, overwrite=False): _DOWNLOAD_URL = 'https://data.vision.ee.ethz.ch/cvl/DIV2K/' filename = download(_DOWNLOAD_URL + file, path=path, overwrite=overwrite) # extract with zipfile.ZipFile(filename,'r') as zip: zip.extractall(path=path)
Example #26
Source File: download_dataset.py From panoptic-fpn-gluon with Apache License 2.0 | 5 votes |
def parse_args(): parser = argparse.ArgumentParser( description='Initialize Cycle Gan dataset.', epilog='Example: python download_dataset.py --download-dir ./', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--download-dir', type=str, default='./', help='dataset directory on disk') parser.add_argument('--overwrite', action='store_true', help='overwrite downloaded files if set, in case they are corrupted') parser.add_argument('--file',type=str,default='horse2zebra',choices=['apple2orange','summer2winter_yosemite','horse2zebra','monet2photo','cezanne2photo','ukiyoe2photo','vangogh2photo','maps','cityscapes','facades','iphone2dslr_flower','ae_photos'], help='Available datasets are: apple2orange, summer2winter_yosemite, horse2zebra, monet2photo, cezanne2photo, ukiyoe2photo, vangogh2photo, maps, cityscapes, facades, iphone2dslr_flower, ae_photos') args = parser.parse_args() return args ##################################################################################### # Download and extract VOC datasets into ``path``
Example #27
Source File: download_dataset.py From panoptic-fpn-gluon with Apache License 2.0 | 5 votes |
def download_data(path,file, overwrite=False): _DOWNLOAD_URL = 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/' filename = download(_DOWNLOAD_URL + file, path=path, overwrite=overwrite) # extract with zipfile.ZipFile(filename,'r') as zip: zip.extractall(path=path)
Example #28
Source File: prepare_coco.py From mxnet-centernet with MIT License | 5 votes |
def parse_args(): parser = argparse.ArgumentParser( description='Initialize MS COCO dataset.', epilog='Example: python mscoco.py --download-dir ~/mscoco', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--download-dir', type=str, default='~/mscoco/', help='dataset directory on disk') parser.add_argument('--no-download', action='store_true', help='disable automatic download if set') parser.add_argument('--overwrite', action='store_true', help='overwrite downloaded files if set, in case they are corrupted') args = parser.parse_args() return args
Example #29
Source File: imagenet.py From cascade_rcnn_gluon with Apache License 2.0 | 5 votes |
def build_rec_process(img_dir, train=False, num_thread=1): rec_dir = os.path.abspath(os.path.join(img_dir, '../rec')) makedirs(rec_dir) prefix = 'train' if train else 'val' print('Building ImageRecord file for ' + prefix + ' ...') to_path = rec_dir # download lst file and im2rec script script_path = os.path.join(rec_dir, 'im2rec.py') script_url = 'https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/im2rec.py' download(script_url, script_path) lst_path = os.path.join(rec_dir, prefix + '.lst') lst_url = 'http://data.mxnet.io/models/imagenet/resnet/' + prefix + '.lst' download(lst_url, lst_path) # execution import sys cmd = [ sys.executable, script_path, rec_dir, img_dir, '--recursive', '--pass-through', '--pack-label', '--num-thread', str(num_thread) ] subprocess.call(cmd) os.remove(script_path) os.remove(lst_path) print('ImageRecord file for ' + prefix + ' has been built!')
Example #30
Source File: ade20k.py From cascade_rcnn_gluon with Apache License 2.0 | 5 votes |
def download_ade(path, overwrite=False): _AUG_DOWNLOAD_URLS = [ ('http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip', '219e1696abb36c8ba3a3afe7fb2f4b4606a897c7'), ('http://data.csail.mit.edu/places/ADEchallenge/release_test.zip', 'e05747892219d10e9243933371a497e905a4860c'),] download_dir = os.path.join(path, 'downloads') makedirs(download_dir) for url, checksum in _AUG_DOWNLOAD_URLS: filename = download(url, path=download_dir, overwrite=overwrite, sha1_hash=checksum) # extract with zipfile.ZipFile(filename,"r") as zip_ref: zip_ref.extractall(path=path)