Python clint.textui.progress.bar() Examples

The following are 30 code examples of clint.textui.progress.bar(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module clint.textui.progress , or try the search function .
Example #1
Source File: export_whitebalance_clicks.py    From opensurfaces with MIT License 6 votes vote down vote up
def handle(self, *args, **options):
        qset = PhotoWhitebalanceLabel.objects.filter(
            whitebalanced=F('photo__whitebalanced')
        )

        out = []
        for label in progress.bar(qset):
            photo = label.photo.__class__.objects.get(id=label.photo.id)
            pil = open_image(photo.image_300)
            points_list = label.points.split(',')
            for idx in xrange(label.num_points):
                x = float(points_list[idx * 2]) * pil.size[0]
                y = float(points_list[idx * 2 + 1]) * pil.size[1]
                rgb = pil.getpixel((x, y))
                out.append([c / 255.0 for c in rgb])

        out = np.array(out)
        np.save(args[0] if args else 'whitebalance_clicks.npy', out) 
Example #2
Source File: setup.py    From Seq2seqChatbots with MIT License 6 votes vote down vote up
def download_data(url, zipped_path, extract):
  # Open the url and download the data with progress bars.
  data_stream = requests.get(url, stream=True)

  with open(zipped_path, 'wb') as file:
    total_length = int(data_stream.headers.get('content-length'))
    for chunk in progress.bar(data_stream.iter_content(chunk_size=1024),
                              expected_size=total_length / 1024 + 1):
      if chunk:
        file.write(chunk)
        file.flush()

  # Extract file.
  zip_file = zipfile.ZipFile(zipped_path, 'r')
  zip_file.extractall(extract)
  zip_file.close() 
Example #3
Source File: client.py    From uspto-opendata-python with MIT License 6 votes vote down vote up
def download_package(self, query_id, format, progressbar=False):
        url = self.PACKAGE_DOWNLOAD_URL.format(query_id=query_id, format=format)
        logger.info('Downloading package for queryId=%s with format=%s. url=%s', query_id, format, url)
        response = self.session.get(url, stream=progressbar)
        assert response.status_code in [200, 302], 'No download package. status={}'.format(response.status_code)
        assert response.headers['Content-Type'] == 'application/zip'
        if not progressbar:
            return response.content
        else:
            # https://stackoverflow.com/questions/15644964/python-progress-bar-and-downloads/20943461#20943461
            total_length = int(response.headers.get('Content-Length'))
            buffer = BytesIO()
            for chunk in progress.bar(response.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1, filled_char='='):
                if chunk:
                    buffer.write(chunk)

            buffer.seek(0)
            return buffer.read() 
Example #4
Source File: update_majority_names.py    From opensurfaces with MIT License 6 votes vote down vote up
def handle(self, *args, **options):
        with transaction.atomic():
            for label in progress.bar(ShapeSubstanceLabel.objects.all()):
                new_hit_settings = label.mturk_assignment.hit.hit_type.experiment.new_hit_settings
                shape = label.shape
                substance = label.substance
                if (shape.substances.filter(substance=substance).count() >=
                        new_hit_settings.min_assignment_consensus):
                    shape.substance = substance
                    shape.update_entropy(save=False)
                    shape.save()

        with transaction.atomic():
            for label in progress.bar(MaterialShapeNameLabel.objects.all()):
                new_hit_settings = label.mturk_assignment.hit.hit_type.experiment.new_hit_settings
                shape = label.shape
                name = label.name
                if (shape.names.filter(name=name).count() >=
                        new_hit_settings.min_assignment_consensus):
                    shape.name = name
                    shape.update_entropy(save=False)
                    shape.save() 
Example #5
Source File: generate.py    From ety-python with MIT License 6 votes vote down vote up
def download_dataset(url, dl_path):
    """
    Download filtered etymwn from jmsv.me mirror, displaying progress bar
    """
    r = requests.get(url, stream=True)

    with open(dl_path, "wb") as f:
        total_length = int(r.headers.get("content-length"))
        chunk_size = 4096
        for chunk in progress.bar(
            r.iter_content(chunk_size=chunk_size),
            expected_size=(total_length / chunk_size) + 1,
        ):
            if chunk:
                f.write(chunk)
                f.flush()
    print("Downloaded to " + dl_path) 
Example #6
Source File: getsploit.py    From getsploit with GNU Lesser General Public License v3.0 6 votes vote down vote up
def downloadGetsploitDb(self, full_path):
        print("Downloading getsploit database archive. Please wait, it may take time. Usually around 5-10 minutes.")
        # {'apiKey':self._Vulners__api_key}
        download_request = self._Vulners__opener.get(self.vulners_urls['searchsploitdb'], stream = True)
        with open(full_path, 'wb') as f:
            total_length = int(download_request.headers.get('content-length'))
            for chunk in progress.bar(download_request.iter_content(chunk_size=1024), expected_size=(total_length / 1024) + 1):
                if chunk:
                    f.write(chunk)
                    f.flush()
        print("\nUnpacking database.")
        zip_ref = zipfile.ZipFile(full_path, 'r')
        zip_ref.extractall(DBPATH)
        zip_ref.close()
        os.remove(full_path)
        return True 
Example #7
Source File: matrix.py    From textplot with MIT License 6 votes vote down vote up
def index(self, text, terms=None, **kwargs):

        """
        Index all term pair distances.

        Args:
            text (Text): The source text.
            terms (list): Terms to index.
        """

        self.clear()

        # By default, use all terms.
        terms = terms or text.terms.keys()

        pairs = combinations(terms, 2)
        count = comb(len(terms), 2)

        for t1, t2 in bar(pairs, expected_size=count, every=1000):

            # Set the Bray-Curtis distance.
            score = text.score_braycurtis(t1, t2, **kwargs)
            self.set_pair(t1, t2, score) 
Example #8
Source File: opensubtitles_chatbot.py    From Seq2seqChatbots with MIT License 6 votes vote down vote up
def download_data(self, train_mode):
    '''
    Params:
      :train_mode:  Whether we are in train or dev mode.
    '''

    # Open the url and download the data with progress bars.
    data_stream = requests.get(self._url, stream=True)
    with open(self._zipped_data, 'wb') as file:
      total_length = int(data_stream.headers.get('content-length'))
      for chunk in progress.bar(data_stream.iter_content(chunk_size=1024),
                                expected_size=total_length / 1024 + 1):
        if chunk:
          file.write(chunk)
          file.flush()

    # Next step is extracting the data.
    print('t2t_csaky_log: Extracting data to ' + self._zipped_data + '.')
    self.extract_data(train_mode)

  # Extract data and go to the next step. 
Example #9
Source File: retriangulate_material_shapes.py    From opensurfaces with MIT License 6 votes vote down vote up
def handle(self, *args, **options):
        print ('This will delete all material shapes, '
               'thereby deleting all quality votes and '
               'any derivative items')

        if raw_input("Are you sure? [y/n] ").lower() != "y":
            print 'Exiting'
            return

        if raw_input("Is it backed up? [y/n] ").lower() != "y":
            print 'Exiting'
            return

        # delete existing shapes
        for shape in progress.bar(MaterialShape.objects.all()):
            for f in shape._ik.spec_files:
                f.delete()
            shape.delete()

        # schedule new ones to be computed
        for photo in progress.bar(Photo.objects.all()):
            retriangulate_material_shapes_task.delay(photo) 
Example #10
Source File: update_majority_names.py    From opensurfaces with MIT License 6 votes vote down vote up
def handle(self, *args, **options):
        with transaction.atomic():
            for label in progress.bar(ShapeSubstanceLabel.objects.all()):
                new_hit_settings = label.mturk_assignment.hit.hit_type.experiment.new_hit_settings
                shape = label.shape
                substance = label.substance
                if (shape.substances.filter(substance=substance).count() >=
                        new_hit_settings.min_assignment_consensus):
                    shape.substance = substance
                    shape.update_entropy(save=False)
                    shape.save()

        with transaction.atomic():
            for label in progress.bar(MaterialShapeNameLabel.objects.all()):
                new_hit_settings = label.mturk_assignment.hit.hit_type.experiment.new_hit_settings
                shape = label.shape
                name = label.name
                if (shape.names.filter(name=name).count() >=
                        new_hit_settings.min_assignment_consensus):
                    shape.name = name
                    shape.update_entropy(save=False)
                    shape.save() 
Example #11
Source File: osp_graph.py    From open-syllabus-project with Apache License 2.0 6 votes vote down vote up
def add_nodes(self):

        """
        Register displayed texts.
        """

        for t in progress.bar(Text_Index.rank_texts()):

            text = t['text']

            self.graph.add_node(text.id, dict(

                label   = text.pretty('title'),
                author  = text.pretty('surname'),

                count   = text.count,
                score   = t['score'],

            )) 
Example #12
Source File: transfer_all_images.py    From opensurfaces with MIT License 6 votes vote down vote up
def handle(self, *args, **options):
        storage = DefaultStorage()

        for model in _get_models(['shapes', 'photos', 'shapes']):
            has_images = False

            # transfer image fields
            for f in model._meta.fields:
                if isinstance(f, models.ImageField):
                    has_images = True
                    if hasattr(storage, 'transfer'):
                        filenames = model.objects.all() \
                            .values_list(f.name, flat=True)
                        print '%s: %s' % (model, f)
                        for filename in progress.bar(filenames):
                            if filename and storage.local.exists(filename):
                                storage.transfer(filename)

            # transfer thumbs
            if has_images:
                print '%s: thumbnails' % model
                ids = model.objects.all().values_list('id', flat=True)
                ct_id = ContentType.objects.get_for_model(model).id
                for id in progress.bar(ids):
                    ensure_thumbs_exist_task.delay(ct_id, id) 
Example #13
Source File: transfer_all_images.py    From opensurfaces with MIT License 6 votes vote down vote up
def handle(self, *args, **options):
        storage = DefaultStorage()

        for model in _get_models(['shapes', 'photos', 'shapes']):
            has_images = False

            # transfer image fields
            for f in model._meta.fields:
                if isinstance(f, models.ImageField):
                    has_images = True
                    if hasattr(storage, 'transfer'):
                        filenames = model.objects.all() \
                            .values_list(f.name, flat=True)
                        print '%s: %s' % (model, f)
                        for filename in progress.bar(filenames):
                            if filename and storage.local.exists(filename):
                                storage.transfer(filename)

            # transfer thumbs
            if has_images:
                print '%s: thumbnails' % model
                ids = model.objects.all().values_list('id', flat=True)
                ct_id = ContentType.objects.get_for_model(model).id
                for id in progress.bar(ids):
                    ensure_thumbs_exist_task.delay(ct_id, id) 
Example #14
Source File: utils.py    From opensurfaces with MIT License 6 votes vote down vote up
def chunk_list_generator(lst, chunksize):
    """ Generator that chunks a list ``lst`` into sublists of size ``chunksize`` """

    if lst:
        chunksize = max(chunksize, 1)
        for i in xrange(0, len(lst), chunksize):
            yield lst[i:i + chunksize]


#def queryset_batch_delete(queryset, batch_size=100000, show_progress=False):
    #""" Delete a large queryset, batching into smaller queries (sometimes huge
    #commands crash) """
    #if show_progress:
        #print 'queryset_batch_delete: fetching ids for %s' % queryset.model
    #ids = queryset.values_list('pk', flat=True)
    #if len(ids) <= batch_size:
        #queryset.delete()
    #else:
        #iterator = range(0, len(ids), batch_size)
        #if show_progress:
            #progress.bar(iterator)
        #for i in iterator:
            #queryset.filter(pk__in=ids[i:i+batch_size]).delete()
    #return len(ids) 
Example #15
Source File: retriangulate_material_shapes.py    From opensurfaces with MIT License 6 votes vote down vote up
def handle(self, *args, **options):
        print ('This will delete all material shapes, '
               'thereby deleting all quality votes and '
               'any derivative items')

        if raw_input("Are you sure? [y/n] ").lower() != "y":
            print 'Exiting'
            return

        if raw_input("Is it backed up? [y/n] ").lower() != "y":
            print 'Exiting'
            return

        # delete existing shapes
        for shape in progress.bar(MaterialShape.objects.all()):
            for f in shape._ik.spec_files:
                f.delete()
            shape.delete()

        # schedule new ones to be computed
        for photo in progress.bar(Photo.objects.all()):
            retriangulate_material_shapes_task.delay(photo) 
Example #16
Source File: export_whitebalance_clicks.py    From opensurfaces with MIT License 6 votes vote down vote up
def handle(self, *args, **options):
        qset = PhotoWhitebalanceLabel.objects.filter(
            whitebalanced=F('photo__whitebalanced')
        )

        out = []
        for label in progress.bar(qset):
            photo = label.photo.__class__.objects.get(id=label.photo.id)
            pil = open_image(photo.image_300)
            points_list = label.points.split(',')
            for idx in xrange(label.num_points):
                x = float(points_list[idx * 2]) * pil.size[0]
                y = float(points_list[idx * 2 + 1]) * pil.size[1]
                rgb = pil.getpixel((x, y))
                out.append([c / 255.0 for c in rgb])

        out = np.array(out)
        np.save(args[0] if args else 'whitebalance_clicks.npy', out) 
Example #17
Source File: utils.py    From open-syllabus-project with Apache License 2.0 6 votes vote down vote up
def query_bar(query):

    """
    Wrap a query in a progress bar.

    Args:
        query (peewee.Query): A query instance.

    Returns:
        The query, wrapped in a progress bar.
    """

    size = query.count()

    return progress.bar(
        ServerSide(query.naive()),
        expected_size=size
    ) 
Example #18
Source File: metrics.py    From dialog-eval with MIT License 6 votes vote down vote up
def download_fasttext(self):
    # Open the url and download the data with progress bars.
    data_stream = requests.get('https://dl.fbaipublicfiles.com/fasttext/' +
      'vectors-english/wiki-news-300d-1M.vec.zip', stream=True)
    zipped_path = os.path.join(self.input_dir, 'fasttext.zip')

    with open(zipped_path, 'wb') as file:
      total_length = int(data_stream.headers.get('content-length'))
      for chunk in progress.bar(data_stream.iter_content(chunk_size=1024),
                                expected_size=total_length / 1024 + 1):
        if chunk:
          file.write(chunk)
          file.flush()

    # Extract file.
    zip_file = zipfile.ZipFile(zipped_path, 'r')
    zip_file.extractall(self.input_dir)
    zip_file.close()

  # Generate a vocab from data files. 
Example #19
Source File: auto_rectify.py    From opensurfaces with MIT License 5 votes vote down vote up
def handle(self, *args, **options):

        ids = MaterialShape.objects.filter(correct=True, planar=True) \
            .exclude(rectified_normal__automatic=True) \
            .filter(rectified_normals__automatic=False) \
            .annotate(c=Count('rectified_normals')) \
            .filter(c__gt=0) \
            .order_by('-num_vertices') \
            .values_list('id', flat=True)

        for id in progress.bar(ids):
            auto_rectify_shape.delay(id) 
Example #20
Source File: corpus.py    From open-syllabus-project with Apache License 2.0 5 votes vote down vote up
def syllabi_bar(self):

        """
        Wrap the syllabi iterator in a progress bar.

        Yields:
            Segment: The next syllabus.
        """

        size = self.file_count

        for syllabus in bar(self.syllabi(), expected_size=size):
            yield syllabus 
Example #21
Source File: utils.py    From packtpub-crawler with MIT License 5 votes vote down vote up
def download_file(r, url, directory, filename, headers):
    """
    Downloads file with progress bar
    """
    if not os.path.exists(directory):
        # creates directories recursively
        os.makedirs(directory)
        log_info('[+] created new directory: ' + directory)

    filename = filename.replace(':', '-')
    path = os.path.join(directory, filename)

    print '[-] downloading file from url: {0}'.format(url)
    response = r.get(url, headers=headers, stream=True)
    #log_dict(response.headers)
    total_length = 0
    test_length = response.headers.get('content-length')
    if test_length is not None:
        total_length = int(test_length)

    with open(path, 'wb') as f:
        for chunk in progress.bar(response.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1):
            if chunk:
                f.write(chunk)
                f.flush()
    log_success('[+] new download: {0}'.format(path))
    return path 
Example #22
Source File: text_index.py    From open-syllabus-project with Apache License 2.0 5 votes vote down vote up
def es_stream_docs(cls):

        """
        Stream Elasticsearch docs.

        Yields:
            dict: The next document.
        """

        for t in progress.bar(cls.rank_texts()):

            text = t['text']

            yield dict(

                _id         = text.id,
                corpus      = text.corpus,
                identifier  = text.identifier,
                url         = text.url,

                authors     = text.pretty('authors'),
                title       = text.pretty('title'),
                publisher   = text.pretty('publisher'),
                date        = text.pretty('date'),
                journal     = text.pretty('journal_title'),

                count       = text.count,
                rank        = t['rank'],
                score       = t['score'],

            ) 
Example #23
Source File: corpus.py    From open-syllabus-project with Apache License 2.0 5 votes vote down vote up
def segments_bar(self):

        """
        Wrap the segments iterator in a progress bar.

        Yields:
            Segment: The next segment.
        """

        for segment in bar(self.segments(), expected_size=self.s2):
            yield segment 
Example #24
Source File: generate.py    From ety-python with MIT License 5 votes vote down vote up
def generate_json(source_path, dir):
    """
    Reads source tsv and restructures data as described:
    https://github.com/jmsv/ety-python/issues/24
    """
    result = {}

    print("Loading source tsv")
    with io.open(source_path, "r", newline="", encoding="utf-8") as source:
        reader = csv.reader(source, delimiter="\t")
        source_rows = list(reader)

    gc.collect()

    print("Structuring data")
    for row in progress.bar(source_rows):
        source_lang, source_word = split_elements(row[0])

        if source_lang not in result:
            result[source_lang] = {}
        if source_word not in result[source_lang]:
            result[source_lang][source_word] = []

        dest_lang, dest_word = split_elements(row[2])
        result[source_lang][source_word].append({dest_word: dest_lang})

        del source_lang, source_word, dest_lang, dest_word

    # Save data to seperate files for languages, may be required in the future
    # print('Saving language files')
    # for key in progress.bar(result):
    #     with io.open(os.path.join(dir, 'data/ety-%s.json' % key), 'w') as f:
    #         f.write(json.dumps(result[key], sort_keys=False))

    # Save data
    print("Writing etymologies file")
    with io.open(os.path.join(dir, "etymologies.json"), "w") as f:
        json.dump(result, f) 
Example #25
Source File: utils.py    From opensurfaces with MIT License 5 votes vote down vote up
def queryset_progress_bar(queryset):
    """ Returns an iterator for a queryset that renders a progress bar with a
    countdown timer """
    count = queryset.count()
    if count:
        return progress.bar(queryset.iterator(), expected_size=count)
    else:
        return [] 
Example #26
Source File: phishfinder.py    From phishfinder with MIT License 5 votes vote down vote up
def download_file(download_url):

  # make sure the URL we're downloading hasn't just been guessed
  global LASTURL

  if (LASTURL == download_url):
    print(bcolors.WARNING + "[!]  Already downloaded {}".format(download_url) + bcolors.ENDC)    
    return

  LASTURL = download_url

  # current date and time for logging
  now = datetime.now() 
  date_time = now.strftime("%m%d%Y%H%M%S-")
  filename = date_time + download_url.split('/')[-1]

  # update the log file
  with safe_open_a(args.outputDir + "/kits.txt") as f:
    f.write(date_time + download_url + "\n")

  # download the kit
  try:
    q = requests.get(download_url, allow_redirects=False, timeout=5, stream=True)
    if q.ok:
      total_length = int(q.headers.get('content-length'))
      sys.stdout.write('[+]  Saving file to {0}{1}{2}...'.format(args.outputDir + "/kits", "/", filename))
      with safe_open_w(args.outputDir + "/kits/" + filename) as kit:
        for chunk in progress.bar(q.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1): 
          if chunk:
            kit.write(chunk)
            kit.flush()
        print(bcolors.OKGREEN + "saved." + bcolors.ENDC)
  except:
    print(bcolors.WARNING + "[!]  An error occurred downloading the file at {}".format(download_url) + bcolors.ENDC)
    return 
Example #27
Source File: detect_vanishing.py    From opensurfaces with MIT License 5 votes vote down vote up
def handle(self, *args, **options):

        qset = Photo.objects.filter(vanishing_points='') \
                .order_by('-scene_category_correct_score') \
                .values_list('id', flat=True)

        for id in progress.bar(qset):
            detect_vanishing_points.delay(id) 
Example #28
Source File: fix_scene_category.py    From opensurfaces with MIT License 5 votes vote down vote up
def handle(self, *args, **options):

        admin_user = User.objects.get_or_create(
            username='admin')[0].get_profile()
        scene_category, _ = PhotoSceneCategory.objects \
                .get_or_create(name='kitchen')

        for id in progress.bar(xrange(1, 37)):
            photo = Photo.objects.get(id=id)
            if not photo.scene_category:
                photo.scene_category = scene_category
            photo.scene_category_correct = True
            photo.save() 
Example #29
Source File: scraper.py    From pluralsight_scrapper with The Unlicense 5 votes vote down vote up
def download(self,url,path):
        r = requests.get(url, stream=True)
        with open(path, 'wb') as f:
            total_length = int(r.headers.get('content-length'))
            for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1):
                if chunk:
                    f.write(chunk)
                    f.flush() 
Example #30
Source File: ipsw_dl.py    From autodecrypt with MIT License 5 votes vote down vote up
def dl(url, filename, sizeofile=0):
    """download IPSW file"""
    if sizeofile == 0:
        dl_file = urlopen(url)
        with open(filename,'wb') as output:
            output.write(dl_file.read())
    else :
        dl_file = requests.get(url, stream=True)
        with open(filename,'wb') as output:
            for chunk in progress.bar(dl_file.iter_content(chunk_size=1024), expected_size=(sizeofile/1024) + 1):
                if chunk:
                    output.write(chunk)
                    output.flush()