Python resource.RUSAGE_SELF Examples
The following are 24
code examples of resource.RUSAGE_SELF().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
resource
, or try the search function
.
![](https://www.programcreek.com/common/static/images/search.png)
Example #1
Source File: __init__.py From QUANTAXIS with MIT License | 7 votes |
def print_used_time(func): ''' 打印运行时间 :param func: 运行的函数名称 :return: ''' @wraps(func) def wrapper(*args, **kwargs): start_time, start_resources = timestamp(), resource_usage(RUSAGE_SELF) func(*args, **kwargs) end_resources, end_time = resource_usage(RUSAGE_SELF), timestamp() print({'消耗时间': {'real': end_time - start_time, 'sys': end_resources.ru_stime - start_resources.ru_stime, 'user': end_resources.ru_utime - start_resources.ru_utime}}) return True return wrapper
Example #2
Source File: mem_test_thread.py From Auto-PyTorch with Apache License 2.0 | 6 votes |
def memory_monitor(command_queue: Queue, poll_interval=1): tracemalloc.start() old_max = 0 snapshot = None while True: try: command_queue.get(timeout=poll_interval) if snapshot is not None: print(datetime.now()) display_top(snapshot) return except Empty: max_rss = getrusage(RUSAGE_SELF).ru_maxrss if max_rss > old_max: old_max = max_rss snapshot = tracemalloc.take_snapshot() display_top(snapshot, limit=1) print(datetime.now(), 'max RSS', old_max)
Example #3
Source File: util.py From opsbro with MIT License | 6 votes |
def get_cpu_consumption(): global daemon_start if os.name == 'nt': return 0 # Some special unix maybe? try: from resource import getrusage, RUSAGE_SELF except ImportError: return 0 now = time.time() # Maybe we did get back in time? if now < daemon_start: daemon_start = now diff = now - daemon_start if diff == 0: return 0 rusage = getrusage(RUSAGE_SELF) current_cpu_time = rusage.ru_utime + rusage.ru_stime return 100 * current_cpu_time / diff ############# String diffs
Example #4
Source File: process.py From pyFileFixity with MIT License | 6 votes |
def update(self): """ Get memory metrics of current process through `getrusage`. Only available on Unix, on Linux most of the fields are not set, and on BSD units are used that are not very helpful, see: http://www.perlmonks.org/?node_id=626693 Furthermore, getrusage only provides accumulated statistics (e.g. max rss vs current rss). """ usage = getrusage(RUSAGE_SELF) self.rss = usage.ru_maxrss * 1024 self.data_segment = usage.ru_idrss * 1024 # TODO: ticks? self.shared_segment = usage.ru_ixrss * 1024 # TODO: ticks? self.stack_segment = usage.ru_isrss * 1024 # TODO: ticks? self.vsz = self.data_segment + self.shared_segment + \ self.stack_segment self.pagefaults = usage.ru_majflt return self.rss != 0
Example #5
Source File: segmenter.py From waldo with Apache License 2.0 | 5 votes |
def init_objects_and_adjacency_records(self): print("Initializing the segmenter...") print("Max mem: {} GB".format(resource.getrusage( resource.RUSAGE_SELF).ru_maxrss / 1024 / 1024)) obj_id = 0 for row in range(self.img_height): for col in range(self.img_width): pixels = set([(row, col)]) obj = Object(pixels, obj_id, self) self.objects[obj_id] = obj self.pixel2obj[(row, col)] = obj obj_id += 1 for row in range(self.img_height): for col in range(self.img_width): obj1 = self.pixel2obj[(row, col)] for o, idx in zip(self.offsets, range(len(self.offsets))): (i, j) = o if (0 <= row + i < self.img_height and 0 <= col + j < self.img_width): obj2 = self.pixel2obj[(row + i, col + j)] arec = AdjacencyRecord(obj1, obj2, self, (row, col), idx) self.adjacency_records[arec] = arec obj1.adjacency_list[arec] = arec obj2.adjacency_list[arec] = arec if arec.merge_priority >= 0: heappush(self.queue, (-arec.merge_priority, arec))
Example #6
Source File: profile.py From dizzy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def profiler(frame, event, arg): if event not in ('call','return'): return profiler #### gather stats #### rusage = getrusage(RUSAGE_SELF) t_cpu = rusage[0] + rusage[1] # user time + system time code = frame.f_code fun = (code.co_name, code.co_filename, code.co_firstlineno) #### get stack with functions entry stats #### ct = threading.currentThread() try: p_stack = ct.p_stack except AttributeError: ct.p_stack = deque() p_stack = ct.p_stack #### handle call and return #### if event == 'call': p_stack.append((time(), t_cpu, fun)) elif event == 'return': try: t,t_cpu_prev,f = p_stack.pop() assert f == fun except IndexError: # TODO investigate t,t_cpu_prev,f = p_start_time, 0.0, None call_cnt, t_sum, t_cpu_sum = p_stats.get(fun, (0, 0.0, 0.0)) p_stats[fun] = (call_cnt+1, t_sum+time()-t, t_cpu_sum+t_cpu-t_cpu_prev) return profiler
Example #7
Source File: fork_histogram.py From mitogen with BSD 3-Clause "New" or "Revised" License | 5 votes |
def playbook_on_stats(self, stats): if hdrh is None or 'FORK_HISTOGRAM' not in os.environ: return self_faults = get_fault_count(resource.RUSAGE_SELF) - self.faults_at_start child_faults = get_fault_count() run_duration_sec = time.time() - self.run_start_time fault_wastage_usec = ( ((self.self_fault_usec * self_faults) + (self.child_fault_usec * child_faults)) ) fork_wastage = self.hist.get_total_count() all_wastage_usec = ((2*self.fork_latency_sum_usec) + fault_wastage_usec) print('--- Fork statistics ---') print('Post-boot run duration: %.02f ms, %d total forks' % ( 1000 * run_duration_sec, self.hist.get_total_count(), )) print('Self faults during boot: %d, post-boot: %d, avg %d/child' % ( self.faults_at_start, self_faults, self_faults / self.hist.get_total_count(), )) print('Total child faults: %d, avg %d/child' % ( child_faults, child_faults / self.hist.get_total_count(), )) print('Est. wastage on faults: %d ms, forks+faults+waits: %d ms (%.2f%%)' % ( fault_wastage_usec / 1000, all_wastage_usec / 1000, 100 * (all_wastage_usec / (run_duration_sec * 1e6)), )) print('99th%% fork latency: %.03f msec, max %d new tasks/sec' % ( self.hist.get_value_at_percentile(99) / 1000.0, 1e6 / self.hist.get_value_at_percentile(99), )) self.hist.output_percentile_distribution(sys.stdout, 1000) print('--- End fork statistics ---') print()
Example #8
Source File: fork_histogram.py From mitogen with BSD 3-Clause "New" or "Revised" License | 5 votes |
def install(self): self.faults_at_start = get_fault_count(resource.RUSAGE_SELF) self.run_start_time = time.time() self.real_fork = os.fork os.fork = self.my_fork
Example #9
Source File: base.py From trappy with Apache License 2.0 | 5 votes |
def generate_parsed_data(self): # Get a rough idea of how much memory we have to play with CHECK_MEM_COUNT = 10000 kb_free = _get_free_memory_kb() starting_maxrss = getrusage(RUSAGE_SELF).ru_maxrss check_memory_usage = True check_memory_count = 1 for (comm, pid, cpu, line, data_str) in zip(self.comm_array, self.pid_array, self.cpu_array, self.line_array, self.data_array): data_dict = {"__comm": comm, "__pid": pid, "__cpu": cpu, "__line": line} data_dict.update(self.generate_data_dict(data_str)) # When running out of memory, Pandas has been observed to segfault # rather than throwing a proper Python error. # Look at how much memory our process is using and warn if we seem # to be getting close to the system's limit, check it only once # in the beginning and then every CHECK_MEM_COUNT events check_memory_count -= 1 if check_memory_usage and check_memory_count == 0: kb_used = (getrusage(RUSAGE_SELF).ru_maxrss - starting_maxrss) if kb_free and kb_used > kb_free * 0.9: warnings.warn("TRAPpy: Appear to be low on memory. " "If errors arise, try providing more RAM") check_memory_usage = False check_memory_count = CHECK_MEM_COUNT yield data_dict
Example #10
Source File: base.py From trappy with Apache License 2.0 | 5 votes |
def generate_parsed_data(self): # Get a rough idea of how much memory we have to play with CHECK_MEM_COUNT = 10000 kb_free = _get_free_memory_kb() starting_maxrss = getrusage(RUSAGE_SELF).ru_maxrss check_memory_usage = True check_memory_count = 1 for (comm, pid, cpu, line, data_str) in zip(self.comm_array, self.pid_array, self.cpu_array, self.line_array, self.data_array): data_dict = {"__comm": comm, "__pid": pid, "__cpu": cpu, "__line": line} data_dict.update(self.generate_data_dict(data_str)) # When running out of memory, Pandas has been observed to segfault # rather than throwing a proper Python error. # Look at how much memory our process is using and warn if we seem # to be getting close to the system's limit, check it only once # in the beginning and then every CHECK_MEM_COUNT events check_memory_count -= 1 if check_memory_usage and check_memory_count == 0: kb_used = (getrusage(RUSAGE_SELF).ru_maxrss - starting_maxrss) if kb_free and kb_used > kb_free * 0.9: warnings.warn("TRAPpy: Appear to be low on memory. " "If errors arise, try providing more RAM") check_memory_usage = False check_memory_count = CHECK_MEM_COUNT yield data_dict
Example #11
Source File: train.py From incremental-sequence-learning with MIT License | 5 votes |
def memusage( point = "") : usage = resource.getrusage( resource.RUSAGE_SELF) return '''%s: usertime = %s systime = %s mem = %s mb '''%( point, usage[ 0 ], usage[ 1 ], ( usage[ 2 ]*resource.getpagesize( ) ) /1000000.0 )
Example #12
Source File: instrumentation.py From Kenshin with Apache License 2.0 | 5 votes |
def _get_usage_info(): rusage = getrusage(RUSAGE_SELF) curr_usage = rusage.ru_utime + rusage.ru_stime curr_time = time.time() return curr_usage, curr_time
Example #13
Source File: reshaper.py From PyReshaper with Apache License 2.0 | 5 votes |
def _get_memory_usage_MB_(): """ Return the maximum memory use of this Python process in MB """ to_MB = 1024. if platform == 'darwin': to_MB *= to_MB return getrusage(RUSAGE_SELF).ru_maxrss / to_MB #========================================================================= # _get_io_blocksize_MB_ #=========================================================================
Example #14
Source File: segmenter.py From waldo with Apache License 2.0 | 5 votes |
def init_objects_and_adjacency_records(self): print("Initializing the segmenter...") print("Max mem: {} GB".format(resource.getrusage( resource.RUSAGE_SELF).ru_maxrss / 1024 / 1024)) obj_id = 0 for row in range(self.img_height): for col in range(self.img_width): pixels = set([(row, col)]) obj = Object(pixels, obj_id, self) self.objects[obj_id] = obj self.pixel2obj[(row, col)] = obj obj_id += 1 for row in range(self.img_height): for col in range(self.img_width): obj1 = self.pixel2obj[(row, col)] for o, idx in zip(self.offsets, range(len(self.offsets))): (i, j) = o if (0 <= row + i < self.img_height and 0 <= col + j < self.img_width): obj2 = self.pixel2obj[(row + i, col + j)] arec = AdjacencyRecord(obj1, obj2, self, (row, col), idx) self.adjacency_records[arec] = arec obj1.adjacency_list[arec] = arec obj2.adjacency_list[arec] = arec if arec.merge_priority >= 0: heappush(self.queue, (-arec.merge_priority, arec))
Example #15
Source File: segmenter.py From waldo with Apache License 2.0 | 5 votes |
def init_objects_and_adjacency_records(self): print("Initializing the segmenter...") print("Max mem: {} GB".format(resource.getrusage( resource.RUSAGE_SELF).ru_maxrss / 1024 / 1024)) obj_id = 0 for row in range(self.img_height): for col in range(self.img_width): pixels = set([(row, col)]) obj = Object(pixels, obj_id, self) self.objects[obj_id] = obj self.pixel2obj[(row, col)] = obj obj_id += 1 for row in range(self.img_height): for col in range(self.img_width): obj1 = self.pixel2obj[(row, col)] for o, idx in zip(self.offsets, range(len(self.offsets))): (i, j) = o if (0 <= row + i < self.img_height and 0 <= col + j < self.img_width): obj2 = self.pixel2obj[(row + i, col + j)] arec = AdjacencyRecord(obj1, obj2, self, (row, col), idx) self.adjacency_records[arec] = arec obj1.adjacency_list[arec] = arec obj2.adjacency_list[arec] = arec if arec.merge_priority >= 0: heappush(self.queue, (-arec.merge_priority, arec))
Example #16
Source File: segmenter.py From waldo with Apache License 2.0 | 5 votes |
def init_objects_and_adjacency_records(self): print("Initializing the segmenter...") print("Max mem: {} GB".format(resource.getrusage( resource.RUSAGE_SELF).ru_maxrss / 1024 / 1024)) obj_id = 0 for row in range(self.img_height): for col in range(self.img_width): pixels = set([(row, col)]) obj = Object(pixels, obj_id, self) self.objects[obj_id] = obj self.pixel2obj[(row, col)] = obj obj_id += 1 for row in range(self.img_height): for col in range(self.img_width): obj1 = self.pixel2obj[(row, col)] for o, idx in zip(self.offsets, range(len(self.offsets))): (i, j) = o if (0 <= row + i < self.img_height and 0 <= col + j < self.img_width): obj2 = self.pixel2obj[(row + i, col + j)] arec = AdjacencyRecord(obj1, obj2, self, (row, col), idx) self.adjacency_records[arec] = arec obj1.adjacency_list[arec] = arec obj2.adjacency_list[arec] = arec if arec.merge_priority >= 0: heappush(self.queue, (-arec.merge_priority, arec))
Example #17
Source File: check_imaging_leaks.py From python3_ios with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _get_mem_usage(self): from resource import getpagesize, getrusage, RUSAGE_SELF mem = getrusage(RUSAGE_SELF).ru_maxrss return mem * getpagesize() / 1024 / 1024
Example #18
Source File: helper.py From python3_ios with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _get_mem_usage(self): """ Gets the RUSAGE memory usage, returns in K. Encapsulates the difference between macOS and Linux rss reporting :returns: memory usage in kilobytes """ from resource import getrusage, RUSAGE_SELF mem = getrusage(RUSAGE_SELF).ru_maxrss if sys.platform == 'darwin': # man 2 getrusage: # ru_maxrss # This is the maximum resident set size utilized (in bytes). return mem / 1024 # Kb else: # linux # man 2 getrusage # ru_maxrss (since Linux 2.6.32) # This is the maximum resident set size used (in kilobytes). return mem # Kb
Example #19
Source File: memusage.py From autopush with Mozilla Public License 2.0 | 5 votes |
def memusage(do_dump_rpy_heap=True, do_objgraph=True): # type: (Optional[bool], Optional[bool]) -> str """Returning a str of memory usage stats""" def trap_err(func, *args, **kwargs): try: return func(*args, **kwargs) except Exception as e: # pragma: nocover # include both __str/repr__, sometimes one's useless buf.writelines([func.__name__, ': ', repr(e), ': ', str(e)]) buf = StringIO() rusage = trap_err(resource.getrusage, resource.RUSAGE_SELF) buf.writelines([repr(rusage), '\n\n']) trap_err(pmap_extended, buf) trap_err(jemalloc_stats, buf) trap_err(glibc_malloc_info, buf) if hasattr(gc, 'get_stats'): buf.writelines(['\n\n', gc.get_stats(), '\n\n']) if do_dump_rpy_heap: # dump rpython's heap before objgraph potentially pollutes the # heap with its heavy workload trap_err(dump_rpy_heap, buf) trap_err(get_stats_asmmemmgr, buf) buf.write('\n\n') if do_objgraph: trap_err(objgraph.show_most_common_types, limit=0, file=buf) return buf.getvalue()
Example #20
Source File: usage.py From deimos with Apache License 2.0 | 5 votes |
def rusage(target=resource.RUSAGE_SELF): r = resource.getrusage(target) fmt = "rss = %0.03fM user = %0.03f sys = %0.03f" return fmt % (r.ru_maxrss / 1024.0, r.ru_utime, r.ru_stime)
Example #21
Source File: usage.py From deimos with Apache License 2.0 | 5 votes |
def self(level=logging.DEBUG): log.log(level, rusage(resource.RUSAGE_SELF))
Example #22
Source File: segmenter.py From waldo with Apache License 2.0 | 4 votes |
def run_segmentation(self): """ This is the top-level function that performs the optimization. This is the overview: - While the queue is non-empty: - Pop (merge_priority, arec) from the queue. - If merge_priority != arec.merge_priority continue # don't worry, the queue will have the right # merge_priority for this arec somewhere else in it. - Recompute arec.merge_priority, which involves recomputing class_delta_log_prob. This is needed because as we merge objects, the value of class_delta_log_prob and/or the number of pixels may have changed and the adjacency record may not have been updated. - If the newly computed arec.merge_priority is >= the old value (i.e. this merge is at least as good a merge as we thought it was when we got it from the queue), go ahead and merge the objects. - Otherwise if arec.merge_priority >=0 then re-insert "arec" into the queue with its newly computed merge priority. """ print("Starting segmentation...") n = 0 self.verbose = 0 self.do_debugging = False while self.queue: if n % 500000 == 0: print("At iteration {}: max mem: {:0.2f} GB".format( n, resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024 / 1024)) self.show_stats() if self.do_debugging: print("Logprob from scratch: {}".format( self.compute_total_logprob_from_scratch())) print("") n += 1 merge_cost, arec = heappop(self.queue) merge_priority = -merge_cost if merge_priority != arec.merge_priority: continue arec.update_merge_priority(self) if arec.merge_priority >= merge_priority: self.merge(arec) elif arec.merge_priority >= 0: heappush(self.queue, (-arec.merge_priority, arec)) if len(self.queue) == 0: print("Finished. Queue is empty.") self.show_stats() self.visualize('final') if self.verbose >= 1: print("Final logprob from scratch: {}".format( self.compute_total_logprob_from_scratch())) return self.output_mask()
Example #23
Source File: segmenter.py From waldo with Apache License 2.0 | 4 votes |
def run_segmentation(self): """ This is the top-level function that performs the optimization. This is the overview: - While the queue is non-empty: - Pop (merge_priority, arec) from the queue. - If merge_priority != arec.merge_priority continue # don't worry, the queue will have the right # merge_priority for this arec somewhere else in it. - Recompute arec.merge_priority, which involves recomputing class_delta_log_prob. This is needed because as we merge objects, the value of class_delta_log_prob and/or the number of pixels may have changed and the adjacency record may not have been updated. - If the newly computed arec.merge_priority is >= the old value (i.e. this merge is at least as good a merge as we thought it was when we got it from the queue), go ahead and merge the objects. - Otherwise if arec.merge_priority >=0 then re-insert "arec" into the queue with its newly computed merge priority. """ print("Starting segmentation...") n = 0 self.verbose = 0 self.do_debugging = False while self.queue: if n % 500000 == 0: print("At iteration {}: max mem: {:0.2f} GB".format( n, resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024 / 1024)) self.show_stats() if self.do_debugging: print("Logprob from scratch: {}".format( self.compute_total_logprob_from_scratch())) print("") n += 1 merge_cost, arec = heappop(self.queue) merge_priority = -merge_cost if merge_priority != arec.merge_priority: continue arec.update_merge_priority(self) if arec.merge_priority >= merge_priority: self.merge(arec) elif arec.merge_priority >= 0: heappush(self.queue, (-arec.merge_priority, arec)) if len(self.queue) == 0: print("Finished. Queue is empty.") self.show_stats() self.visualize('final') if self.verbose >= 1: print("Final logprob from scratch: {}".format( self.compute_total_logprob_from_scratch())) return self.output_mask()
Example #24
Source File: segmenter.py From waldo with Apache License 2.0 | 4 votes |
def run_segmentation(self): """ This is the top-level function that performs the optimization. This is the overview: - While the queue is non-empty: - Pop (merge_priority, arec) from the queue. - If merge_priority != arec.merge_priority continue # don't worry, the queue will have the right # merge_priority for this arec somewhere else in it. - Recompute arec.merge_priority, which involves recomputing class_delta_log_prob. This is needed because as we merge objects, the value of class_delta_log_prob and/or the number of pixels may have changed and the adjacency record may not have been updated. - If the newly computed arec.merge_priority is >= the old value (i.e. this merge is at least as good a merge as we thought it was when we got it from the queue), go ahead and merge the objects. - Otherwise if arec.merge_priority >=0 then re-insert "arec" into the queue with its newly computed merge priority. """ print("Starting segmentation...") n = 0 self.verbose = 0 self.do_debugging = False while self.queue: if n % 500000 == 0: print("At iteration {}: max mem: {:0.2f} GB".format( n, resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024 / 1024)) self.show_stats() if self.do_debugging: print("Logprob from scratch: {}".format( self.compute_total_logprob_from_scratch())) print("") n += 1 merge_cost, arec = heappop(self.queue) merge_priority = -merge_cost if merge_priority != arec.merge_priority: continue arec.update_merge_priority(self) if arec.merge_priority >= merge_priority: self.merge(arec) elif arec.merge_priority >= 0: heappush(self.queue, (-arec.merge_priority, arec)) if len(self.queue) == 0: print("Finished. Queue is empty.") self.show_stats() self.visualize('final') if self.verbose >= 1: print("Final logprob from scratch: {}".format( self.compute_total_logprob_from_scratch())) return self.output_mask()