Python pyprind.ProgBar() Examples
The following are 23
code examples of pyprind.ProgBar().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
pyprind
, or try the search function
.
Example #1
Source File: compute_features.py From omgh with MIT License | 6 votes |
def main(sname, iteration, cropped, full, flipped, force, dataset, storage_name): new_name = '%s-%d' % (sname, iteration) if dataset == 'segmented': cub = CUB_200_2011_Segmented(settings.CUB_ROOT, full=full) elif dataset == 'part-head': cub = CUB_200_2011_Parts_Head(settings.CUB_ROOT, full=full) elif dataset == 'part-body': cub = CUB_200_2011_Parts_Body(settings.CUB_ROOT, full=full) elif dataset == 'part-head-rf-new': cub = CUB_200_2011(settings.CUB_ROOT, 'images_head_rf_new') elif dataset == 'part-body-rf-new': cub = CUB_200_2011(settings.CUB_ROOT, 'images_body_rf_new') else: cub = CUB_200_2011(settings.CUB_ROOT, images_folder_name=dataset, full=full) if not storage_name: ft_storage = datastore(settings.storage(new_name)) else: ft_storage = datastore(settings.storage(storage_name)) ft_extractor = CNN_Features_CAFFE_REFERENCE(ft_storage, model_file=settings.model(new_name), pretrained_file=settings.pretrained(new_name), full=full, crop_index=0) number_of_images_in_dataset = sum(1 for _ in cub.get_all_images()) bar = pyprind.ProgBar(number_of_images_in_dataset, width=80) for t, des in ft_extractor.extract_all(cub.get_all_images(), flip=flipped, crop=cropped, bbox=cub.get_bbox(), force=force): bar.update() print 'DONE'
Example #2
Source File: portfolio.py From PyTrendFollow with MIT License | 6 votes |
def validate(self): """ Runs Instrument.validate for every Instrument in the Portfolio and returns a DataFrame. Used for trading. """ import concurrent.futures bar = pyprind.ProgBar(len(self.instruments.values()), title='Validating instruments') with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: dl = {executor.submit(x.validate): x.name for x in self.instruments.values()} d = {} for fut in concurrent.futures.as_completed(dl): bar.update(item_id=dl[fut]) d[dl[fut]] = fut.result() d = pd.DataFrame(d).transpose() # blacklist instruments that didn't pass validation self.inst_blacklist = d[d['is_valid'] == False].index.tolist() return d
Example #3
Source File: smilite.py From smilite with GNU General Public License v3.0 | 5 votes |
def generate_zincid_smile_csv(zincid_list, out_file, print_progress_bar=True, backend='zinc12'): """ Generates a CSV file of ZINC_ID,SMILE_string entries by querying the ZINC online database. Keyword arguments: zincid_list (str): Path to a UTF-8 or ASCII formatted file that contains 1 ZINC_ID per row. E.g., ZINC0000123456 ZINC0000234567 [...] out_file (str): Path to a new output CSV file that will be written. print_prgress_bar (bool): Prints a progress bar to the screen if True. """ id_smile_pairs = [] with open(zincid_list, 'r') as infile: all_lines = infile.readlines() if print_progress_bar: pbar = pyprind.ProgBar(len(all_lines), title='Downloading SMILES') for line in all_lines: line = line.strip() id_smile_pairs.append((line, get_zinc_smile(line, backend=backend))) if print_progress_bar: pbar.update() with open(out_file, 'w') as out: for p in id_smile_pairs: out.write('{},{}\n'.format(p[0], p[1]))
Example #4
Source File: compute_berkeley_features.py From omgh with MIT License | 5 votes |
def main(force): cub_head = CUB_200_2011(settings.CUB_ROOT, images_folder_name='images_b_head') cub_body = CUB_200_2011(settings.CUB_ROOT, images_folder_name='images_b_body') cub_crop = CUB_200_2011(settings.CUB_ROOT, images_folder_name='images_b_cropped') st_head = datastore(settings.storage('bmbh')) st_body = datastore(settings.storage('bmbb')) st_crop = datastore(settings.storage('bmbcflp')) ext_head = Berkeley_Extractor(st_head, pretrained_file=settings.BERKELEY_HEAD_PRET) ext_body = Berkeley_Extractor(st_body, pretrained_file=settings.BERKELEY_BODY_PRET) ext_crop = Berkeley_Extractor(st_crop, pretrained_file=settings.BERKELEY_CROP_PRET) number_of_images_in_dataset = sum(1 for _ in cub_crop.get_all_images()) bar = pyprind.ProgBar(number_of_images_in_dataset, width=80) for t, des in ext_crop.extract_all(cub_crop.get_all_images(), flip=True, force=force): bar.update() print 'DONE CROP' bar = pyprind.ProgBar(number_of_images_in_dataset, width=80) for t, des in ext_head.extract_all(cub_head.get_all_images(), force=force): bar.update() print 'DONE HEAD' bar = pyprind.ProgBar(number_of_images_in_dataset, width=80) for t, des in ext_body.extract_all(cub_body.get_all_images(), force=force): bar.update() print 'DONE BODY'
Example #5
Source File: prog_bar.py From rlpyt with MIT License | 5 votes |
def __init__(self, total_count): self.total_count = total_count self.max_progress = 1000000 self.cur_progress = 0 self.cur_count = 0 if not logger.get_log_tabular_only(): self.pbar = pyprind.ProgBar(self.max_progress) else: self.pbar = None
Example #6
Source File: dataset_loader.py From semi-adversarial-networks with MIT License | 5 votes |
def preprocess(self): image_files = set(os.listdir(self.image_path)) protoSM_files = set(os.listdir(self.proto_smG_path)) protoOP_files = set(os.listdir(self.proto_opG_path)) existing_files = image_files & protoSM_files & protoOP_files print('existing_files : ', len(existing_files)) self.train_filenames = [] self.train_labels = [] self.test_filenames = [] self.test_labels = [] #df = self.df.sample(frac=1).reset_index(drop=True) pbar = pyprind.ProgBar(len(self.df)) for row in self.df.iterrows(): filename = row[1]['index'] gender = row[1]['Male'] if self.mode == 'train': if filename in existing_files: self.train_filenames.append(filename) self.train_labels.append(gender) elif self.mode == 'test': if filename in existing_files: self.test_filenames.append(filename) self.test_labels.append(gender) pbar.update() sys.stderr.flush()
Example #7
Source File: anonymize_data.py From django-GDPR with MIT License | 5 votes |
def _anonymize_by_obj(self, obj_anonymizer, qs): bar = pyprind.ProgBar( qs.count(), title='Anonymize model {}'.format(self._get_full_model_name(qs.model)), stream=ProgressBarStream(self.stdout) ) for obj in chunked_iterator(qs, obj_anonymizer.chunk_size): obj_anonymizer().anonymize_obj(obj) bar.update()
Example #8
Source File: anonymize_data.py From django-GDPR with MIT License | 5 votes |
def _anonymize_by_qs(self, obj_anonymizer, qs): bar = pyprind.ProgBar( max(math.ceil(qs.count() // obj_anonymizer.chunk_size), 1), title='Anonymize model {}'.format(self._get_full_model_name(qs.model)), stream=ProgressBarStream(self.stdout) ) for batch_qs in chunked_queryset_iterator(qs, obj_anonymizer.chunk_size, delete_qs=isinstance( obj_anonymizer, DeleteModelAnonymizer)): obj_anonymizer().anonymize_qs(batch_qs) bar.update()
Example #9
Source File: download.py From PyTrendFollow with MIT License | 5 votes |
def download_all(prov_name, qtype, recent, concurrent): if qtype == QuotesType.futures: instruments = Instrument.load(config.portfolios.p_all) dl_fn = partial(dl_inst, prov_name=prov_name, recent=recent) attr = 'name' title_name = 'contracts' elif qtype == QuotesType.currency: instruments = Currency.load_all() dl_fn = partial(dl_cur, prov_name=prov_name) attr = 'code' title_name = 'currencies' elif qtype == QuotesType.others: instruments = Spot.load_all() dl_fn = partial(dl_spot, prov_name=prov_name) attr = 'name' title_name = 'spot prices' else: raise Exception('Unknown quotes type') title = 'Downloading %s history for %s' % (title_name, prov_name) if concurrent: title += ' (parallel)' bar = pyprind.ProgBar(len(instruments.values()), title=title) if concurrent: import concurrent.futures with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: download_futures = {executor.submit(lambda v: dl_fn(v), x): getattr(x, attr) for x in instruments.values()} for future in concurrent.futures.as_completed(download_futures): bar.update(item_id=download_futures[future]) else: for i in instruments.values(): dl_fn(i)
Example #10
Source File: test_progress_bar.py From pyprind with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_force_flush(): bar = pyprind.ProgBar(n) for i in range(n): time.sleep(sleeptime) bar.update(force_flush=True)
Example #11
Source File: test_progress_bar.py From pyprind with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_character(): bar = pyprind.ProgBar(n, bar_char='>') for i in range(n): time.sleep(sleeptime) bar.update()
Example #12
Source File: test_progress_bar.py From pyprind with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_item_tracking(): items = ['file_%s.csv' % i for i in range(0, n)] bar = pyprind.ProgBar(len(items)) for i in items: time.sleep(sleeptime) bar.update(item_id=i)
Example #13
Source File: test_progress_bar.py From pyprind with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_width(): bar = pyprind.ProgBar(n, width=10) for i in range(n): time.sleep(sleeptime) bar.update()
Example #14
Source File: test_progress_bar.py From pyprind with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_monitoring(): bar = pyprind.ProgBar(n, monitor=True) for i in range(n): time.sleep(sleeptime) bar.update() print(bar)
Example #15
Source File: test_progress_bar.py From pyprind with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_bar(): bar = pyprind.ProgBar(n) for i in range(n): time.sleep(sleeptime) bar.update()
Example #16
Source File: ex1_progress_bar_stdout.py From pyprind with BSD 3-Clause "New" or "Revised" License | 5 votes |
def example_1(): n = 1000000 my_bar = pyprind.ProgBar(n, stream=1) for i in range(n): # do some computation my_bar.update()
Example #17
Source File: ex1_progress_bar_stderr.py From pyprind with BSD 3-Clause "New" or "Revised" License | 5 votes |
def example_1(): n = 1000000 my_bar = pyprind.ProgBar(n, width=40, stream=2) for i in range(n): my_bar.update()
Example #18
Source File: ex2_progressbar_allargs.py From pyprind with BSD 3-Clause "New" or "Revised" License | 5 votes |
def example_2(): n = 1000000 my_bar = pyprind.ProgBar(n, stream=1, width=30, track_time=True, title='My Progress Bar', monitor=True) for i in range(n): # do some computation my_bar.update() print('\n\nPrint tracking object ...\n') print(my_bar)
Example #19
Source File: ex3_progress_bar_monitor.py From pyprind with BSD 3-Clause "New" or "Revised" License | 5 votes |
def example_3(): n = 1000000 my_bar = pyprind.ProgBar(n, stream=1, title='example3', monitor=True) for i in range(n): # do some computation my_bar.update() print(my_bar)
Example #20
Source File: graph.py From arches with GNU Affero General Public License v3.0 | 5 votes |
def delete_instances(self, verbose=False): """ deletes all associated resource instances """ if verbose is True: bar = pyprind.ProgBar(Resource.objects.filter(graph_id=self.graphid).count()) for resource in Resource.objects.filter(graph_id=self.graphid): resource.delete() if verbose is True: bar.update() if verbose is True: print(bar)
Example #21
Source File: backtest.py From bt with MIT License | 4 votes |
def run(self): """ Runs the Backtest. """ if self.has_run: return # set run flag to avoid running same test more than once self.has_run = True # setup strategy self.strategy.setup(self.data) # adjust strategy with initial capital self.strategy.adjust(self.initial_capital) # loop through dates # init progress bar if self.progress_bar: bar = pyprind.ProgBar(len(self.dates), title=self.name, stream=1) # since there is a dummy row at time 0, start backtest at date 1. # we must still update for t0 self.strategy.update(self.dates[0]) # and for the backtest loop, start at date 1 for dt in self.dates[1:]: # update progress bar if self.progress_bar: bar.update() # update strategy self.strategy.update(dt) if not self.strategy.bankrupt: self.strategy.run() # need update after to save weights, values and such self.strategy.update(dt) else: if self.progress_bar: bar.stop() self.stats = self.strategy.prices.calc_perf_stats() self._original_prices = self.strategy.prices
Example #22
Source File: filter.py From py_stringsimjoin with BSD 3-Clause "New" or "Revised" License | 4 votes |
def _filter_candset_split(candset, candset_l_key_attr, candset_r_key_attr, ltable, rtable, l_key_attr, r_key_attr, l_filter_attr, r_filter_attr, filter_object, show_progress): # Find column indices of key attr and filter attr in ltable l_columns = list(ltable.columns.values) l_key_attr_index = l_columns.index(l_key_attr) l_filter_attr_index = l_columns.index(l_filter_attr) # Find column indices of key attr and filter attr in rtable r_columns = list(rtable.columns.values) r_key_attr_index = r_columns.index(r_key_attr) r_filter_attr_index = r_columns.index(r_filter_attr) # Build a dictionary on ltable ltable_dict = build_dict_from_table(ltable, l_key_attr_index, l_filter_attr_index, remove_null=False) # Build a dictionary on rtable rtable_dict = build_dict_from_table(rtable, r_key_attr_index, r_filter_attr_index, remove_null=False) # Find indices of l_key_attr and r_key_attr in candset candset_columns = list(candset.columns.values) candset_l_key_attr_index = candset_columns.index(candset_l_key_attr) candset_r_key_attr_index = candset_columns.index(candset_r_key_attr) valid_rows = [] if show_progress: prog_bar = pyprind.ProgBar(len(candset)) for candset_row in candset.itertuples(index = False): l_id = candset_row[candset_l_key_attr_index] r_id = candset_row[candset_r_key_attr_index] l_row = ltable_dict[l_id] r_row = rtable_dict[r_id] valid_rows.append(not filter_object.filter_pair( l_row[l_filter_attr_index], r_row[r_filter_attr_index])) if show_progress: prog_bar.update() return candset[valid_rows]
Example #23
Source File: Crawler.py From IPTV with MIT License | 4 votes |
def search_accounts(self, url = None): """Search Accounts This is the core method. It will crawl the give url for any possible accounts If we found any we will create a new directory under /output with the name of the site plus every account as five .m3u. Please use VLC for opening that kind of files Keyword arguments: url -- an url from the fetched list. (default None) Return: string -- the status of the crawling session """ if not self.parsedUrls: return "You must fetch some URLs first" try: if not url: url = random.choice(self.parsedUrls) fileName = self.languageDir + "/" + self.language + ".txt" fileLength = self.file_length(fileName) progressBar = pyprind.ProgBar(fileLength, title = "Fetching account from " + url + " this might take a while.", stream = 1, monitor = True) foundedAccounts = 0 with open(fileName) as f: rows = f.readlines() for row in rows: # Do the injection to the current url using the exploit that we know opener = urllib2.build_opener() opener.addheaders = [('User-agent', 'Mozilla/5.0')] response = opener.open(url + self.basicString % (row.rstrip().lstrip(), row.rstrip().lstrip())) fetched = response.read() # Update the progress bar in order to give to the user a nice # way to indicate the time left fileLength = fileLength - 1 progressBar.update() # IF the fetched content is not empty # we build the dedicated .m3u file if len(fetched) > 0: newPath = self.outputDir + "/" + url.replace("http://", "") self.create_file(row, newPath, fetched) # Remove the current used url in order to avoid to parse it again self.parsedUrls.remove(url) if self.foundedAccounts != 0: return "Search done, account founded on " + url + ": " + str(self.foundedAccounts) else: return "No results for " + url except IOError: return "Cannot open the current Language file. Try another one" except urllib2.HTTPError, e: return "Ops, HTTPError exception here. Cannot fetch the current URL " + str(e.code)