Python progressbar.Bar() Examples
The following are 30
code examples of progressbar.Bar().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
progressbar
, or try the search function
.
Example #1
Source File: progress.py From desmod with MIT License | 6 votes |
def _get_progressbar_widgets( sim_index: Optional[int], timescale: TimeValue, know_stop_time: bool ) -> List[progressbar.widgets.WidgetBase]: widgets = [] if sim_index is not None: widgets.append(f'Sim {sim_index:3}|') magnitude, units = timescale if magnitude == 1: sim_time_format = f'%(value)6.0f {units}|' else: sim_time_format = f'{magnitude}x%(value)6.0f {units}|' widgets.append(progressbar.FormatLabel(sim_time_format)) widgets.append(progressbar.Percentage()) if know_stop_time: widgets.append(progressbar.Bar()) else: widgets.append(progressbar.BouncingBar()) widgets.append(progressbar.ETA()) return widgets
Example #2
Source File: geo_heatmap.py From geo-heatmap with MIT License | 6 votes |
def loadKMLData(self, file_name, date_range): """Loads the Google location data from the given KML file. Arguments: file_name {string or file} -- The name of the KML file (or an open file-like object) with the Google location data. date_range {tuple} -- A tuple containing the min-date and max-date. e.g.: (None, None), (None, '2019-01-01'), ('2017-02-11'), ('2019-01-01') """ xmldoc = minidom.parse(file_name) gxtrack = xmldoc.getElementsByTagName("gx:coord") when = xmldoc.getElementsByTagName("when") w = [Bar(), Percentage(), " ", ETA()] with ProgressBar(max_value=len(gxtrack), widgets=w) as pb: for i, number in enumerate(gxtrack): loc = (number.firstChild.data).split() coords = (round(float(loc[1]), 6), round(float(loc[0]), 6)) date = when[i].firstChild.data if dateInRange(date[:10], date_range): self.updateCoord(coords) pb.update(i)
Example #3
Source File: Utils_Video.py From Tensorflow_Object_Tracking_Video with MIT License | 6 votes |
def extract_frames(vid_path, video_perc): list=[] frames=[] # Opening & Reading the Video print("Opening File Video:%s " % vid_path) vidcap = cv2.VideoCapture(vid_path) if not vidcap.isOpened(): print "could Not Open :",vid_path return print("Opened File Video:%s " % vid_path) print("Start Reading File Video:%s " % vid_path) image = vidcap.read() total = int((vidcap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)/100)*video_perc) print("%d Frames to Read"%total) progress = progressbar.ProgressBar(widgets=[progressbar.Bar('=', '[', ']'), ' ',progressbar.Percentage(), ' ',progressbar.ETA()]) for i in progress(range(0,total)): list.append("frame%d.jpg" % i) frames.append(image) image = vidcap.read() print("Finish Reading File Video:%s " % vid_path) return frames, list
Example #4
Source File: geo_heatmap.py From geo-heatmap with MIT License | 6 votes |
def loadGPXData(self, file_name, date_range): """Loads location data from the given GPX file. Arguments: file_name {string or file} -- The name of the GPX file (or an open file-like object) with the GPX data. date_range {tuple} -- A tuple containing the min-date and max-date. e.g.: (None, None), (None, '2019-01-01'), ('2017-02-11'), ('2019-01-01') """ xmldoc = minidom.parse(file_name) gxtrack = xmldoc.getElementsByTagName("trkpt") w = [Bar(), Percentage(), " ", ETA()] with ProgressBar(max_value=len(gxtrack), widgets=w) as pb: for i, trkpt in enumerate(gxtrack): lat = trkpt.getAttribute("lat") lon = trkpt.getAttribute("lon") coords = (round(float(lat), 6), round(float(lon), 6)) date = trkpt.getElementsByTagName("time")[0].firstChild.data if dateInRange(date[:10], date_range): self.updateCoord(coords) pb.update(i)
Example #5
Source File: dataset.py From zhusuan with MIT License | 6 votes |
def show_progress(block_num, block_size, total_size): global pbar if pbar is None: if total_size > 0: prefixes = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi') power = min(int(math.log(total_size, 2) / 10), len(prefixes) - 1) scaled = float(total_size) / (2 ** (10 * power)) total_size_str = '{:.1f} {}B'.format(scaled, prefixes[power]) try: marker = '█' except UnicodeEncodeError: marker = '*' widgets = [ progressbar.Percentage(), ' ', progressbar.DataSize(), ' / ', total_size_str, ' ', progressbar.Bar(marker=marker), ' ', progressbar.ETA(), ' ', progressbar.AdaptiveTransferSpeed(), ] pbar = progressbar.ProgressBar(widgets=widgets, max_value=total_size) else: widgets = [ progressbar.DataSize(), ' ', progressbar.Bar(marker=progressbar.RotatingMarker()), ' ', progressbar.Timer(), ' ', progressbar.AdaptiveTransferSpeed(), ] pbar = progressbar.ProgressBar(widgets=widgets, max_value=progressbar.UnknownLength) downloaded = block_num * block_size if downloaded < total_size: pbar.update(downloaded) else: pbar.finish() pbar = None
Example #6
Source File: Utils_Video.py From Tensorflow_Object_Tracking_Video with MIT License | 6 votes |
def make_tracked_video(out_vid_path, labeled_video_frames): if labeled_video_frames[0] is not None: img = cv2.imread(labeled_video_frames[0], True) print "Reading Filename: %s"%labeled_video_frames[0] h, w = img.shape[:2] print "Video Size: width: %d height: %d"%(h, w) fourcc = cv2.cv.CV_FOURCC('m', 'p', '4', 'v') out = cv2.VideoWriter(out_vid_path,fourcc, 20.0, (w, h), True) print("Start Making File Video:%s " % out_vid_path) print("%d Frames to Compress"%len(labeled_video_frames)) progress = progressbar.ProgressBar(widgets=[progressbar.Bar('=', '[', ']'), ' ',progressbar.Percentage(), ' ',progressbar.ETA()]) for i in progress(range(0,len(labeled_video_frames))): if utils_image.check_image_with_pil(labeled_video_frames[i]): out.write(img) img = cv2.imread(labeled_video_frames[i], True) out.release() print("Finished Making File Video:%s " % out_vid_path)
Example #7
Source File: data_handler.py From cortex with BSD 3-Clause "New" or "Revised" License | 6 votes |
def reset(self, mode, make_pbar=True, string=''): self.mode = mode self.u = 0 if make_pbar: widgets = [string, Timer(), ' | ', Percentage(), ' | ', ETA(), Bar()] if len([len(loader[self.mode]) for loader in self.loaders.values()]) == 0: maxval = 1000 else: maxval = min(len(loader[self.mode]) for loader in self.loaders.values()) self.pbar = ProgressBar(widgets=widgets, maxval=maxval).start() else: self.pbar = None sources = self.loaders.keys() self.iterators = dict((source, self.make_iterator(source)) for source in sources)
Example #8
Source File: _windows.py From microk8s with Apache License 2.0 | 6 votes |
def _init_progress_bar(total_length, destination, message=None): if not message: message = "Downloading {!r}".format(os.path.basename(destination)) valid_length = total_length and total_length > 0 if valid_length and is_dumb_terminal(): widgets = [message, " ", Percentage()] maxval = total_length elif valid_length and not is_dumb_terminal(): widgets = [message, Bar(marker="=", left="[", right="]"), " ", Percentage()] maxval = total_length elif not valid_length and is_dumb_terminal(): widgets = [message] maxval = UnknownLength else: widgets = [message, AnimatedMarker()] maxval = UnknownLength return ProgressBar(widgets=widgets, maxval=maxval)
Example #9
Source File: timing.py From e2e-nlg-challenge-2017 with Apache License 2.0 | 6 votes |
def create_progress_bar(dynamic_msg=None): # Taken from Andreas Rueckle. # usage: # bar = _create_progress_bar('loss') # L = [] # for i in bar(iterable): # ... # L.append(...) # # bar.dynamic_messages['loss'] = np.mean(L) widgets = [ ' [batch ', progressbar.SimpleProgress(), '] ', progressbar.Bar(), ' (', progressbar.ETA(), ') ' ] if dynamic_msg is not None: widgets.append(progressbar.DynamicMessage(dynamic_msg)) return progressbar.ProgressBar(widgets=widgets)
Example #10
Source File: geo_heatmap.py From geo-heatmap with MIT License | 6 votes |
def loadJSONData(self, json_file, date_range): """Loads the Google location data from the given json file. Arguments: json_file {file} -- An open file-like object with JSON-encoded Google location data. date_range {tuple} -- A tuple containing the min-date and max-date. e.g.: (None, None), (None, '2019-01-01'), ('2017-02-11'), ('2019-01-01') """ data = json.load(json_file) w = [Bar(), Percentage(), " ", ETA()] with ProgressBar(max_value=len(data["locations"]), widgets=w) as pb: for i, loc in enumerate(data["locations"]): if "latitudeE7" not in loc or "longitudeE7" not in loc: continue coords = (round(loc["latitudeE7"] / 1e7, 6), round(loc["longitudeE7"] / 1e7, 6)) if timestampInRange(loc["timestampMs"], date_range): self.updateCoord(coords) pb.update(i)
Example #11
Source File: utils.py From HoneyBot with MIT License | 6 votes |
def capture_on_interface(interface, name, timeout=60): """ :param interface: The name of the interface on which to capture traffic :param name: The name of the capture file :param timeout: A limit in seconds specifying how long to capture traffic """ if timeout < 15: logger.error("Timeout must be over 15 seconds.") return if not sys.warnoptions: warnings.simplefilter("ignore") start = time.time() widgets = [ progressbar.Bar(marker=progressbar.RotatingMarker()), ' ', progressbar.FormatLabel('Packets Captured: %(value)d'), ' ', progressbar.Timer(), ] progress = progressbar.ProgressBar(widgets=widgets) capture = pyshark.LiveCapture(interface=interface, output_file=os.path.join('tmp', name)) pcap_size = 0 for i, packet in enumerate(capture.sniff_continuously()): progress.update(i) if os.path.getsize(os.path.join('tmp', name)) != pcap_size: pcap_size = os.path.getsize(os.path.join('tmp', name)) if not isinstance(packet, pyshark.packet.packet.Packet): continue if time.time() - start > timeout: break if pcap_size > const.PT_MAX_BYTES: break capture.clear() capture.close() return pcap_size
Example #12
Source File: knn_missing_data.py From Generative-ConvACs with MIT License | 6 votes |
def knn_masked_data(trX,trY,missing_data_dir, input_shape, k): raw_im_data = np.loadtxt(join(script_dir,missing_data_dir,'index.txt'),delimiter=' ',dtype=str) raw_mask_data = np.loadtxt(join(script_dir,missing_data_dir,'index_mask.txt'),delimiter=' ',dtype=str) # Using 'brute' method since we only want to do one query per classifier # so this will be quicker as it avoids overhead of creating a search tree knn_m = KNeighborsClassifier(algorithm='brute',n_neighbors=k) prob_Y_hat = np.zeros((raw_im_data.shape[0],int(np.max(trY)+1))) total_images = raw_im_data.shape[0] pbar = progressbar.ProgressBar(widgets=[progressbar.FormatLabel('\rProcessed %(value)d of %(max)d Images '), progressbar.Bar()], maxval=total_images, term_width=50).start() for i in range(total_images): mask_im=load_image(join(script_dir,missing_data_dir,raw_mask_data[i][0]), input_shape,1).reshape(np.prod(input_shape)) mask = np.logical_not(mask_im > eps) # since mask is 1 at missing locations v_im=load_image(join(script_dir,missing_data_dir,raw_im_data[i][0]), input_shape, 255).reshape(np.prod(input_shape)) rep_mask = np.tile(mask,(trX.shape[0],1)) # Corrupt whole training set according to the current mask corr_trX = np.multiply(trX, rep_mask) knn_m.fit(corr_trX, trY) prob_Y_hat[i,:] = knn_m.predict_proba(v_im.reshape(1,-1)) pbar.update(i) pbar.finish() return prob_Y_hat
Example #13
Source File: bar_logger.py From karonte with BSD 2-Clause "Simplified" License | 6 votes |
def set_tot_elaborations(self, tot_elaborations): """ Set the total number of elaborations :param tot_elaborations: total number of elaborations :return: None """ widgets = [ progressbar.Percentage(), ' (', progressbar.SimpleProgress(), ') ', progressbar.Bar(), progressbar.Timer(), ' ETC: ', self._ETC, ' ' ] self._bar = progressbar.ProgressBar(redirect_stderr=True, max_value=tot_elaborations, widgets=widgets) self._bar.start() self._tot_elaborations = tot_elaborations
Example #14
Source File: eval_script.py From Tensorflow_Object_Tracking_Video with MIT License | 6 votes |
def save_best_iou(val_bbox, output_bbox): progress = progressbar.ProgressBar(widgets=[progressbar.Bar('=', '[', ']'), ' ',progressbar.Percentage(), ' ',progressbar.ETA()]) count_best_bbox=0 len_val_bbox=len(val_bbox) len_output_bbox=len(output_bbox) count_missing_boxes=0 with open("best_iou.txt", 'a') as d: for i in progress(range(0, len(val_bbox))): for rect in val_bbox[i].rects: if(len(output_bbox[i].rects)>0): selected=multiclass_rectangle.pop_max_iou(output_bbox[i].rects,rect) count_best_bbox=count_best_bbox+1 d.write(str(val_bbox[i].frame)+' '+str(rect.label_chall)+ ' 0.5 '+str(selected.x1)+' '+str(selected.y1)+' '+str(selected.x2)+' '+str(selected.y2) + os.linesep) else: count_missing_boxes=count_missing_boxes+1 print "Total Frame Number: "+ str(len_val_bbox) print "Total Output Bounding Boxes: "+ str(len_output_bbox) print "Total Best Bounding Boxes: "+ str(count_best_bbox) print "Total Missing Bounding Boxes: "+ str(count_missing_boxes) print "Total False Positive Bounding Boxes: "+ str(len_output_bbox-count_best_bbox) print "BBox/Frame Number: "+ str(float(count_best_bbox)/float(len_val_bbox)) print "Missing BBox/Frame Number: "+ str(float(float(count_missing_boxes)/float(len_val_bbox))) print "False Positive BBox/Frame Number: "+ str(float(float(len_output_bbox-count_best_bbox)/float(len_val_bbox)))
Example #15
Source File: eval_script.py From Tensorflow_Object_Tracking_Video with MIT License | 6 votes |
def save_best_overlap(val_bbox, output_bbox): progress = progressbar.ProgressBar(widgets=[progressbar.Bar('=', '[', ']'), ' ',progressbar.Percentage(), ' ',progressbar.ETA()]) count_best_bbox=0 len_val_bbox=len(val_bbox) len_output_bbox=len(output_bbox) count_missing_boxes=0 with open("best_overlap.txt", 'a') as d: for i in progress(range(0, len(val_bbox))): for rect in val_bbox[i].rects: if(len(output_bbox[i].rects)>0): selected=multiclass_rectangle.pop_max_overlap(output_bbox[i].rects,rect) count_best_bbox=count_best_bbox+1 d.write(str(val_bbox[i].frame)+' '+str(rect.label_chall)+ ' 0.5 '+str(selected.x1)+' '+str(selected.y1)+' '+str(selected.x2)+' '+str(selected.y2) + os.linesep) else: count_missing_boxes=count_missing_boxes+1 print "Total Frame Number: "+ str(len_val_bbox) print "Total Output Bounding Boxes: "+ str(len_output_bbox) print "Total Best Bounding Boxes: "+ str(count_best_bbox) print "Total Missing Bounding Boxes: "+ str(count_missing_boxes) print "Total False Positive Bounding Boxes: "+ str(len_output_bbox-count_best_bbox) print "BBox/Frame Number: "+ str(float(count_best_bbox)/float(len_val_bbox)) print "Missing BBox/Frame Number: "+ str(float(float(count_missing_boxes)/float(len_val_bbox))) print "False Positive BBox/Frame Number: "+ str(float(float(len_output_bbox-count_best_bbox)/float(len_val_bbox)))
Example #16
Source File: progress.py From desmod with MIT License | 6 votes |
def _get_overall_pbar( num_simulations: int, max_width: int, fd: IO ) -> progressbar.ProgressBar: pbar = progressbar.ProgressBar( fd=fd, min_value=0, max_value=num_simulations, widgets=[ progressbar.FormatLabel('%(value)s of %(max_value)s '), 'simulations (', progressbar.Percentage(), ') ', progressbar.Bar(), progressbar.ETA(), ], ) if max_width and pbar.term_width > max_width: pbar.term_width = max_width return pbar
Example #17
Source File: fastboot_debug.py From luci-py with Apache License 2.0 | 6 votes |
def KwargHandler(kwargs, argspec): if 'info_cb' in argspec.args: # Use an unbuffered version of stdout. def InfoCb(message): if not message.message: return sys.stdout.write('%s: %s\n' % (message.header, message.message)) sys.stdout.flush() kwargs['info_cb'] = InfoCb if 'progress_callback' in argspec.args: bar = progressbar.ProgessBar( widgets=[progressbar.Bar(), progressbar.Percentage()]) bar.start() def SetProgress(current, total): bar.update(current / total * 100.0) if current == total: bar.finish() kwargs['progress_callback'] = SetProgress
Example #18
Source File: download.py From chainer with MIT License | 6 votes |
def download(url, dst_file_path): # Download a file, showing progress bar_wrap = [None] def reporthook(count, block_size, total_size): bar = bar_wrap[0] if bar is None: bar = progressbar.ProgressBar( maxval=total_size, widgets=[ progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.FileTransferSpeed(), ' | ', progressbar.ETA(), ]) bar.start() bar_wrap[0] = bar bar.update(min(count * block_size, total_size)) request.urlretrieve(url, dst_file_path, reporthook=reporthook)
Example #19
Source File: search.py From thingscoop with MIT License | 6 votes |
def label_video(filename, classifier, sample_rate=1, recreate_index=False): index_filename = generate_index_path(filename, classifier.model) if os.path.exists(index_filename) and not recreate_index: return read_index_from_path(index_filename) temp_frame_dir, frames = extract_frames(filename, sample_rate=sample_rate) timed_labels = [] widgets=["Labeling {}: ".format(filename), Percentage(), ' ', Bar(), ' ', ETA()] pbar = ProgressBar(widgets=widgets, maxval=len(frames)).start() for index, frame in enumerate(frames): pbar.update(index) labels = classifier.classify_image(frame) if not len(labels): continue t = (1./sample_rate) * index timed_labels.append((t, labels)) shutil.rmtree(temp_frame_dir) save_index_to_path(index_filename, timed_labels) return timed_labels
Example #20
Source File: models.py From thingscoop with MIT License | 6 votes |
def download_model(model): if model_in_cache(model): return model_url = get_model_url(model) tmp_zip = tempfile.NamedTemporaryFile(suffix=".zip") prompt = "Downloading model {}".format(model) def cb(count, block_size, total_size): global progress_bar if not progress_bar: widgets = [prompt, Percentage(), ' ', Bar(), ' ', FileTransferSpeed(), ' ', ETA()] progress_bar = ProgressBar(widgets=widgets, maxval=int(total_size)).start() progress_bar.update(min(total_size, count * block_size)) urllib.urlretrieve(model_url, tmp_zip.name, cb) z = zipfile.ZipFile(tmp_zip) out_path = get_model_local_path(model) try: os.mkdir(out_path) except: pass for name in z.namelist(): if name.startswith("_"): continue z.extract(name, out_path)
Example #21
Source File: __init__.py From attention-lvcsr with MIT License | 6 votes |
def create_bar(self): """Create a new progress bar. Calls `self.get_iter_per_epoch()`, selects an appropriate set of widgets and creates a ProgressBar. """ iter_per_epoch = self.get_iter_per_epoch() epochs_done = self.main_loop.log.status['epochs_done'] if iter_per_epoch is None: widgets = ["Epoch {}, step ".format(epochs_done), progressbar.Counter(), ' ', progressbar.BouncingBar(), ' ', progressbar.Timer()] iter_per_epoch = progressbar.UnknownLength else: widgets = ["Epoch {}, step ".format(epochs_done), progressbar.Counter(), ' (', progressbar.Percentage(), ') ', progressbar.Bar(), ' ', progressbar.Timer(), ' ', progressbar.ETA()] return progressbar.ProgressBar(widgets=widgets, max_value=iter_per_epoch)
Example #22
Source File: download.py From AmusingPythonCodes with MIT License | 6 votes |
def prepare_h5py(train_image, train_label, test_image, test_label, data_dir, shape=None): image = np.concatenate((train_image, test_image), axis=0).astype(np.uint8) label = np.concatenate((train_label, test_label), axis=0).astype(np.uint8) print('Preprocessing data...') bar = progressbar.ProgressBar(maxval=100, widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()]) bar.start() f = h5py.File(os.path.join(data_dir, 'data.hy'), 'w') with open(os.path.join(data_dir, 'id.txt'), 'w') as data_id: for i in range(image.shape[0]): if i % (image.shape[0] / 100) == 0: bar.update(i / (image.shape[0] / 100)) grp = f.create_group(str(i)) data_id.write('{}\n'.format(i)) if shape: grp['image'] = np.reshape(image[i], shape, order='F') else: grp['image'] = image[i] label_vec = np.zeros(10) label_vec[label[i] % 10] = 1 grp['label'] = label_vec.astype(np.bool) bar.finish() f.close() return
Example #23
Source File: VID_yolo.py From Tensorflow_Object_Tracking_Video with MIT License | 5 votes |
def print_YOLO_DET_result(det_results_list,folder_path_summary_result, file_path_summary_result ): results_list=[] if not os.path.exists(folder_path_summary_result): os.makedirs(folder_path_summary_result) print("Created Folder: %s"%folder_path_summary_result) print("Starting Loading Results ") progress = progressbar.ProgressBar(widgets=[progressbar.Bar('=', '[', ']'), ' ',progressbar.Percentage(), ' ',progressbar.ETA()]) names=['class_name', 'x1','y1','x2','y2','score'] df = pandas.DataFrame(columns=names) mean=0.0 with open(file_path_summary_result, "w") as out_file: for i in progress(range(0,len(det_results_list))): #df.append(pandas.read_csv(det_results_list[i], sep=',',names=names, encoding="utf8")) #results_list.append(pandas.read_csv(det_results_list[i], sep=',',names=names, encoding="utf8")) for line in open(det_results_list[i], "r"): df.loc[i] =tuple(line.strip().split(',')) mean=mean+float(df.loc[i].score) out_file.write(str(tuple(line.strip().split(',')))+ os.linesep) print("Finished Loading Results ") print("Computing Final Mean Reasults..") print "Class: " + df.class_name.max() print "Max Value: " + df.score.max() print "Min Value: " + df.score.min() print "Avg Value: " + str(mean/len(df)) return ######### MAIN ###############
Example #24
Source File: utils.py From dqa-net with Apache License 2.0 | 5 votes |
def get_pbar(num, prefix=""): assert isinstance(prefix, str) pbar = pb.ProgressBar(widgets=[prefix, pb.Percentage(), pb.Bar(), pb.ETA()], maxval=num) return pbar
Example #25
Source File: convert_fc_to_tfrecords.py From GapFlyt with BSD 3-Clause "New" or "Revised" License | 5 votes |
def convert_dataset(indices, name): # Open a TFRRecordWriter filename = os.path.join(FLAGS.out, name + '.tfrecords') writeOpts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB) writer = tf.python_io.TFRecordWriter(filename, options=writeOpts) # Load each data sample (image_a, image_b, flow) and write it to the TFRecord count = 0 pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(indices)).start() for i in indices: image_a_path = os.path.join(FLAGS.data_dir, '%05d_img1.ppm' % (i + 1)) image_b_path = os.path.join(FLAGS.data_dir, '%05d_img2.ppm' % (i + 1)) flow_path = os.path.join(FLAGS.data_dir, '%05d_flow.flo' % (i + 1)) image_a = imread(image_a_path) image_b = imread(image_b_path) # Convert from RGB -> BGR image_a = image_a[..., [2, 1, 0]] image_b = image_b[..., [2, 1, 0]] # Scale from [0, 255] -> [0.0, 1.0] image_a = image_a / 255.0 image_b = image_b / 255.0 image_a_raw = image_a.tostring() image_b_raw = image_b.tostring() flow_raw = open_flo_file(flow_path).tostring() example = tf.train.Example(features=tf.train.Features(feature={ 'image_a': _bytes_feature(image_a_raw), 'image_b': _bytes_feature(image_b_raw), 'flow': _bytes_feature(flow_raw)})) writer.write(example.SerializeToString()) pbar.update(count + 1) count += 1 writer.close()
Example #26
Source File: es2csv.py From es2csv with Apache License 2.0 | 5 votes |
def write_to_csv(self): if self.num_results > 0: self.num_results = sum(1 for line in codecs.open(self.tmp_file, mode='r', encoding='utf-8')) if self.num_results > 0: output_file = codecs.open(self.opts.output_file, mode='a', encoding='utf-8') csv_writer = csv.DictWriter(output_file, fieldnames=self.csv_headers) csv_writer.writeheader() timer = 0 widgets = ['Write to csv ', progressbar.Bar(left='[', marker='#', right=']'), progressbar.FormatLabel(' [%(value)i/%(max)i] ['), progressbar.Percentage(), progressbar.FormatLabel('] [%(elapsed)s] ['), progressbar.ETA(), '] [', progressbar.FileTransferSpeed(unit='lines'), ']' ] bar = progressbar.ProgressBar(widgets=widgets, maxval=self.num_results).start() for line in codecs.open(self.tmp_file, mode='r', encoding='utf-8'): timer += 1 bar.update(timer) csv_writer.writerow(json.loads(line)) output_file.close() bar.finish() else: print('There is no docs with selected field(s): {}.'.format(','.join(self.opts.fields))) os.remove(self.tmp_file)
Example #27
Source File: download.py From Group-Normalization-Tensorflow with MIT License | 5 votes |
def prepare_h5py(train_image, train_label, test_image, test_label, data_dir, num_class=10, shape=None): image = np.concatenate((train_image, test_image), axis=0).astype(np.uint8) label = np.concatenate((train_label, test_label), axis=0).astype(np.uint8) print('Preprocessing data...') import progressbar bar = progressbar.ProgressBar( maxval=100, widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()] ) bar.start() f = h5py.File(os.path.join(data_dir, 'data.hdf5'), 'w') with open(os.path.join(data_dir, 'id.txt'), 'w') as data_id: for i in range(image.shape[0]): if i % (image.shape[0] / 100) == 0: bar.update(i / (image.shape[0] / 100)) grp = f.create_group(str(i)) data_id.write('{}\n'.format(i)) if shape: grp['image'] = np.reshape(image[i], shape, order='F') else: grp['image'] = image[i] label_vec = np.zeros(num_class) label_vec[label[i] % num_class] = 1 grp['label'] = label_vec.astype(np.bool) bar.finish() f.close() return
Example #28
Source File: download.py From SSGAN-Tensorflow with MIT License | 5 votes |
def prepare_h5py(train_image, train_label, test_image, test_label, data_dir, shape=None): image = np.concatenate((train_image, test_image), axis=0).astype(np.uint8) label = np.concatenate((train_label, test_label), axis=0).astype(np.uint8) print('Preprocessing data...') import progressbar bar = progressbar.ProgressBar(maxval=100, widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()]) bar.start() f = h5py.File(os.path.join(data_dir, 'data.hdf5'), 'w') data_id = open(os.path.join(data_dir,'id.txt'), 'w') for i in range(image.shape[0]): if i%(image.shape[0]/100)==0: bar.update(i/(image.shape[0]/100)) grp = f.create_group(str(i)) data_id.write(str(i)+'\n') if shape: grp['image'] = np.reshape(image[i], shape, order='F') else: grp['image'] = image[i] label_vec = np.zeros(10) label_vec[label[i]%10] = 1 grp['label'] = label_vec.astype(np.bool) bar.finish() f.close() data_id.close() return
Example #29
Source File: baidufuse.py From baidu-fuse with GNU General Public License v2.0 | 5 votes |
def __call__(self, *args, **kwargs): if self.first_call: self.widgets = [progressbar.Percentage(), ' ', progressbar.Bar(marker=progressbar.RotatingMarker('>')), ' ', progressbar.FileTransferSpeed()] self.pbar = progressbar.ProgressBar(widgets=self.widgets, maxval=kwargs['size']).start() self.first_call = False if kwargs['size'] <= kwargs['progress']: self.pbar.finish() else: self.pbar.update(kwargs['progress'])
Example #30
Source File: eval_script.py From Tensorflow_Object_Tracking_Video with MIT License | 5 votes |
def val_to_data(source): text_lines=[] frames_list=[] frame = None progress = progressbar.ProgressBar(widgets=[progressbar.Bar('=', '[', ']'), ' ',progressbar.Percentage(), ' ',progressbar.ETA()]) with open(source, 'r') as s: for line in s: id_frame, id_class, conf, xmin, ymin, xmax, ymax = line.strip().split(' ') text_lines.append((id_frame, id_class, conf, xmin, ymin, xmax, ymax)) for i in range(0, len(text_lines)): if frame is None: frame = fm.Frame_Info() frame.frame= text_lines[i][0] rect= multiclass_rectangle.Rectangle_Multiclass() # Not all the inserted values are really used rect.load_labeled_rect(0, text_lines[i][2], text_lines[i][2], text_lines[i][3], text_lines[i][4], text_lines[i][5], text_lines[i][6], text_lines[i][1], text_lines[i][1], text_lines[i][1]) frame.append_labeled_rect(rect) else : if frame.frame == text_lines[i][0]: rect= multiclass_rectangle.Rectangle_Multiclass() # Not all the inserted values are really used rect.load_labeled_rect(0, text_lines[i][2], text_lines[i][2], text_lines[i][3], text_lines[i][4], text_lines[i][5], text_lines[i][6], text_lines[i][1], text_lines[i][1], text_lines[i][1]) frame.append_labeled_rect(rect) else : frames_list.append(frame) frame = fm.Frame_Info() frame.frame= text_lines[i][0] rect= multiclass_rectangle.Rectangle_Multiclass() # Not all the inserted values are really used rect.load_labeled_rect(0, text_lines[i][2], text_lines[i][2], text_lines[i][3], text_lines[i][4], text_lines[i][5], text_lines[i][6], text_lines[i][1], text_lines[i][1], text_lines[i][1]) frame.append_labeled_rect(rect) frames_list.append(frame) return frames_list