Python memory_profiler.memory_usage() Examples
The following are 26
code examples of memory_profiler.memory_usage().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
memory_profiler
, or try the search function
.
Example #1
Source File: ipython_memory_usage.py From ipython_memory_usage with BSD 2-Clause "Simplified" License | 7 votes |
def during_execution_memory_sampler(): """Thread to sample memory usage""" import time import memory_profiler global keep_watching, peak_memory_usage peak_memory_usage = -1 keep_watching = True n = 0 WAIT_BETWEEN_SAMPLES_SECS = 0.001 MAX_ITERATIONS = 60.0 / WAIT_BETWEEN_SAMPLES_SECS while True: mem_usage = memory_profiler.memory_usage()[0] peak_memory_usage = max(mem_usage, peak_memory_usage) time.sleep(WAIT_BETWEEN_SAMPLES_SECS) if not keep_watching or n > MAX_ITERATIONS: # exit if we've been told our command has finished or if it has run # for more than a sane amount of time (e.g. maybe something crashed # and we don't want this to carry on running) if n > MAX_ITERATIONS: print("{} SOMETHING WEIRD HAPPENED AND THIS RAN FOR TOO LONG, THIS THREAD IS KILLING ITSELF".format(__file__)) break n += 1
Example #2
Source File: 04_dimension_reduction_and_performance.py From dirty_cat with BSD 3-Clause "New" or "Revised" License | 7 votes |
def resource_used(func): """ Decorator that return a function that prints its usage """ @functools.wraps(func) def wrapped_func(*args, **kwargs): t0 = time() mem, out = memory_profiler.memory_usage((func, args, kwargs), max_usage=True, retval=True) print("Run time: %.1is Memory used: %iMb" % (time() - t0, mem)) return out return wrapped_func ################################################################################ # Data Importing and preprocessing # -------------------------------- # # We first download the dataset:
Example #3
Source File: notebook_memory_management.py From h2o4gpu with Apache License 2.0 | 6 votes |
def watch_memory(): # bring in the global memory usage value from the previous iteration global previous_call_memory_usage, peak_memory_usage, keep_watching, \ watching_memory, input_cells new_memory_usage = memory_profiler.memory_usage()[0] memory_delta = new_memory_usage - previous_call_memory_usage keep_watching = False # calculate time delta using global t1 (from the pre-run event) and current # time time_delta_secs = time.time() - t1 num_commands = len(input_cells) - 1 cmd = "In [{}]".format(num_commands) # convert the results into a pretty string output_template = ("{cmd} used {memory_delta:0.4f} MiB RAM in " "{time_delta:0.2f}s, total RAM usage " "{memory_usage:0.2f} MiB") output = output_template.format(time_delta=time_delta_secs, cmd=cmd, memory_delta=memory_delta, memory_usage=new_memory_usage) if watching_memory: print(str(output)) previous_call_memory_usage = new_memory_usage
Example #4
Source File: ipython_memory_usage_perf.py From ipython_memory_usage with BSD 2-Clause "Simplified" License | 6 votes |
def during_execution_memory_sampler(): import time import memory_profiler global keep_watching, peak_memory_usage peak_memory_usage = -1 keep_watching = True n = 0 WAIT_BETWEEN_SAMPLES_SECS = 0.001 MAX_ITERATIONS = 60.0 / WAIT_BETWEEN_SAMPLES_SECS while True: mem_usage = memory_profiler.memory_usage()[0] peak_memory_usage = max(mem_usage, peak_memory_usage) time.sleep(WAIT_BETWEEN_SAMPLES_SECS) if not keep_watching or n > MAX_ITERATIONS: # exit if we've been told our command has finished or if it has run # for more than a sane amount of time (e.g. maybe something crashed # and we don't want this to carry on running) if n > MAX_ITERATIONS: print("{} SOMETHING WEIRD HAPPENED AND THIS RAN FOR TOO LONG, THIS THREAD IS KILLING ITSELF".format(__file__)) break n += 1
Example #5
Source File: gen_rst.py From sphinx-gallery with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _get_memory_base(gallery_conf): """Get the base amount of memory used by running a Python process.""" if not gallery_conf['plot_gallery']: return 0. # There might be a cleaner way to do this at some point from memory_profiler import memory_usage if sys.platform in ('win32', 'darwin'): sleep, timeout = (1, 2) else: sleep, timeout = (0.5, 1) proc = subprocess.Popen( [sys.executable, '-c', 'import time, sys; time.sleep(%s); sys.exit(0)' % sleep], close_fds=True) memories = memory_usage(proc, interval=1e-3, timeout=timeout) kwargs = dict(timeout=timeout) if sys.version_info >= (3, 5) else {} proc.communicate(**kwargs) # On OSX sometimes the last entry can be None memories = [mem for mem in memories if mem is not None] + [0.] memory_base = max(memories) return memory_base
Example #6
Source File: memwatcher.py From ipython_memwatcher with BSD 2-Clause "Simplified" License | 6 votes |
def during_execution_memory_sampler(self): import time import memory_profiler self.peak_memory_usage = -1 self.keep_watching = True n = 0 WAIT_BETWEEN_SAMPLES_SECS = 0.001 MAX_ITERATIONS = 60.0 / WAIT_BETWEEN_SAMPLES_SECS while True: mem_usage = memory_profiler.memory_usage()[0] self.peak_memory_usage = max(mem_usage, self.peak_memory_usage) time.sleep(WAIT_BETWEEN_SAMPLES_SECS) if not self.keep_watching or n > MAX_ITERATIONS: # exit if we've been told our command has finished or # if it has run for more than a sane amount of time # (e.g. maybe something crashed and we don't want this # to carry on running) if n > MAX_ITERATIONS: print("{} SOMETHING WEIRD HAPPENED AND THIS RAN FOR TOO LONG, THIS THREAD IS KILLING ITSELF".format(__file__)) break n += 1
Example #7
Source File: ale_data_set.py From Model-Free-Episodic-Control with MIT License | 6 votes |
def test_memory_usage_ok(): import memory_profiler dataset = DataSet(width=80, height=80, rng=np.random.RandomState(42), max_steps=100000, phi_length=4) last = time.time() for i in xrange(1000000000): if (i % 100000) == 0: print i dataset.add_sample(np.random.random((80, 80)), 1, 1, False) if i > 200000: imgs, actions, rewards, terminals = \ dataset.random_batch(32) if (i % 10007) == 0: print time.time() - last mem_usage = memory_profiler.memory_usage(-1) print len(dataset), mem_usage last = time.time()
Example #8
Source File: test_wrappers.py From autograd with MIT License | 6 votes |
def checkpoint_memory(): '''This test is meant to be run manually, since it depends on memory_profiler and its behavior may vary.''' try: from memory_profiler import memory_usage except ImportError: return def f(a): for _ in range(10): a = np.sin(a**2 + 1) return a checkpointed_f = checkpoint(f) def testfun(f, x): for _ in range(5): x = f(x) return np.sum(x) gradfun = grad(testfun, 1) A = npr.RandomState(0).randn(100000) max_usage = max(memory_usage((gradfun, (f, A)))) max_checkpointed_usage = max(memory_usage((gradfun, (checkpointed_f, A)))) assert max_checkpointed_usage < max_usage / 2.
Example #9
Source File: memwatcher.py From ipython_memwatcher with BSD 2-Clause "Simplified" License | 6 votes |
def watch_memory(self): if not self.watching_memory: return # calculate time delta using global t1 (from the pre-run # event) and current time self.time_delta = time.time() - self.t1 new_memory_usage = memory_profiler.memory_usage()[0] self.memory_delta = new_memory_usage - self.previous_call_memory_usage self.keep_watching = False self.peaked_memory_usage = max(0, self.peak_memory_usage - new_memory_usage) num_commands = len(self.input_cells) - 1 cmd = "In [{}]".format(num_commands) # convert the results into a pretty string output_template = ("{cmd} used {memory_delta:0.3f} MiB RAM in " "{time_delta:0.3f}s, peaked {peaked_memory_usage:0.3f} " "MiB above current, total RAM usage " "{memory_usage:0.3f} MiB") output = output_template.format( time_delta=self.time_delta, cmd=cmd, memory_delta=self.memory_delta, peaked_memory_usage=self.peaked_memory_usage, memory_usage=new_memory_usage) print(str(output)) self.previous_call_memory_usage = new_memory_usage
Example #10
Source File: memwatcher.py From ipython_memwatcher with BSD 2-Clause "Simplified" License | 6 votes |
def __init__(self): # keep a global accounting for the last known memory usage # which is the reference point for the memory delta calculation self.previous_call_memory_usage = memory_profiler.memory_usage()[0] self.t1 = time.time() # will be set to current time later self.keep_watching = True self.peak_memory_usage = -1 self.peaked_memory_usage = -1 self.memory_delta = 0 self.time_delta = 0 self.watching_memory = True self.ip = get_ipython() self.input_cells = self.ip.user_ns['In'] self._measurements = namedtuple( 'Measurements', ['memory_delta', 'time_delta', 'memory_peak', 'memory_usage'], )
Example #11
Source File: ale_data_set.py From deep_q_rl with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_memory_usage_ok(): import memory_profiler dataset = DataSet(width=80, height=80, rng=np.random.RandomState(42), max_steps=100000, phi_length=4) last = time.time() for i in xrange(1000000000): if (i % 100000) == 0: print i dataset.add_sample(np.random.random((80, 80)), 1, 1, False) if i > 200000: imgs, actions, rewards, terminals = \ dataset.random_batch(32) if (i % 10007) == 0: print time.time() - last mem_usage = memory_profiler.memory_usage(-1) print len(dataset), mem_usage last = time.time()
Example #12
Source File: profiler.py From SNIPER-mxnet with Apache License 2.0 | 5 votes |
def profile(func_to_profile): """ This function helps in profile given func_to_profile for run-time and memory consumption. Capable of profile for both GPU and CPU machine. Uses environment variable - IS_GPU to identify whether to profile for CPU or GPU. returns: run_time, memory_usage """ run_time = 0; # Seconds memory_usage = 0; # MBs # Choose nvidia-smi or memory_profiler for memory profiling for GPU and CPU # machines respectively. if(IS_GPU): # Start time - For timing the runtime start_time = time.time() open('nvidia-smi-output.csv', 'a').close() gpu_monitor_process = subprocess.Popen(GPU_MONITOR_CMD, shell=True, preexec_fn=os.setsid) func_to_profile() end_time = time.time() os.killpg(os.getpgid(gpu_monitor_process.pid), signal.SIGTERM) run_time = end_time - start_time memory_usage = gpu_mem_profile('nvidia-smi-output.csv') else: # Start time - For timing the runtime start_time = time.time() memory_usage = cpu_memory_profile(func_to_profile) end_time = time.time() run_time = end_time - start_time return run_time, memory_usage
Example #13
Source File: test_base.py From phy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_visual_benchmark(qtbot, vertex_shader_nohook, fragment_shader): try: from memory_profiler import memory_usage except ImportError: # pragma: no cover logger.warning("Skip test depending on unavailable memory_profiler module.") return class TestCanvas(QOpenGLWindow): def paintGL(self): gloo.clear() program.draw('points') program = gloo.Program(vertex_shader_nohook, fragment_shader) canvas = TestCanvas() canvas.show() qtbot.waitForWindowShown(canvas) def f(): for _ in range(100): program['a_position'] = (-1 + 2 * np.random.rand(100_000, 2)).astype(np.float32) canvas.update() qtbot.wait(1) mem = memory_usage(f) usage = max(mem) - min(mem) print(usage) # NOTE: this test is failing currently because of a memory leak in the the gloo module. # Recreating a buffer at every cluster selection causes a memory leak, once should ideally # use a single large buffer and reuse that, even if the buffer's content is actually smaller. # assert usage < 10 canvas.close()
Example #14
Source File: test_memory_usage.py From metaworld with MIT License | 5 votes |
def mt50_usage(): profile = {} for env_cls in ALL_V1_ENVIRONMENTS.values(): target = (build_and_step, [env_cls], {}) memory_usage = memory_profiler.memory_usage(target) profile[env_cls] = max(memory_usage) return profile
Example #15
Source File: test_memory_usage.py From metaworld with MIT License | 5 votes |
def test_avg_memory_usage(): # average usage no greater than 60MB/env target = (build_and_step_all, [ALL_V1_ENVIRONMENTS.values()], {}) usage = memory_profiler.memory_usage(target) average = max(usage) / len(ALL_V1_ENVIRONMENTS) assert average < 60
Example #16
Source File: test_memory_usage.py From metaworld with MIT License | 5 votes |
def test_from_task_memory_usage(): target = (ML45.from_task, ['reach-v1'], {}) usage = memory_profiler.memory_usage(target) assert max(usage) < 250
Example #17
Source File: profile_memory_usage.py From metaworld with MIT License | 5 votes |
def profile_hard_mode_indepedent(): profile = {} for env_cls in HARD_MODE_LIST: target = (build_and_step, [env_cls], {}) memory_usage = memory_profiler.memory_usage(target) profile[env_cls] = max(memory_usage) return profile
Example #18
Source File: profile_memory_usage.py From metaworld with MIT License | 5 votes |
def profile_hard_mode_shared(): target = (build_and_step_all, [HARD_MODE_LIST], {}) usage = memory_profiler.memory_usage(target) return max(usage)
Example #19
Source File: utils.py From learning-from-human-preferences with MIT License | 5 votes |
def profile_memory(log_path, pid): import memory_profiler def profile(): with open(log_path, 'w') as f: # timeout=99999 is necessary because for external processes, # memory_usage otherwise defaults to only returning a single sample # Note that even with interval=1, because memory_profiler only # flushes every 50 lines, we still have to wait 50 seconds before # updates. memory_profiler.memory_usage(pid, stream=f, timeout=99999, interval=1) p = Process(target=profile, daemon=True) p.start() return p
Example #20
Source File: profiler.py From SNIPER-mxnet with Apache License 2.0 | 5 votes |
def cpu_memory_profile(func_to_profile): max_mem_usage = memory_usage(proc=(func_to_profile, ()), max_usage=True) return max_mem_usage[0]
Example #21
Source File: pgp_utils.py From app with MIT License | 5 votes |
def encrypt_file(data: BytesIO, fingerprint: str) -> str: LOG.d("encrypt for %s", fingerprint) mem_usage = memory_usage(-1, interval=1, timeout=1)[0] LOG.d("mem_usage %s", mem_usage) # todo if mem_usage > 300: LOG.error("Force exit") hard_exit() r = gpg.encrypt_file(data, fingerprint, always_trust=True) if not r.ok: # maybe the fingerprint is not loaded on this host, try to load it mailbox = Mailbox.get_by(pgp_finger_print=fingerprint) if mailbox: LOG.d("(re-)load public key for %s", mailbox) load_public_key(mailbox.pgp_public_key) LOG.d("retry to encrypt") data.seek(0) r = gpg.encrypt_file(data, fingerprint, always_trust=True) if not r.ok: raise PGPException(f"Cannot encrypt, status: {r.status}") return str(r)
Example #22
Source File: notebook_memory_management.py From azure-python-labs with MIT License | 5 votes |
def watch_memory(): # bring in the global memory usage value from the previous iteration global previous_call_memory_usage, keep_watching, watching_memory, input_cells new_memory_usage = memory_profiler.memory_usage()[0] memory_delta = new_memory_usage - previous_call_memory_usage keep_watching = False total_memory = psutil.virtual_memory()[0] / 1024 / 1024 # in Mb # calculate time delta using global t1 (from the pre-run event) and current time time_delta_secs = time.time() - t1 num_commands = len(input_cells) - 1 cmd = "In [{}]".format(num_commands) # convert the results into a pretty string output_template = ( "{cmd} used {memory_delta:0.4f} Mb RAM in " "{time_delta:0.2f}s, total RAM usage " "{memory_usage:0.2f} Mb, total RAM " "memory {total_memory:0.2f} Mb" ) output = output_template.format( time_delta=time_delta_secs, cmd=cmd, memory_delta=memory_delta, memory_usage=new_memory_usage, total_memory=total_memory, ) if watching_memory: print(str(output)) previous_call_memory_usage = new_memory_usage
Example #23
Source File: notebook_memory_management.py From azure-python-labs with MIT License | 5 votes |
def watch_memory(): # bring in the global memory usage value from the previous iteration global previous_call_memory_usage, keep_watching, watching_memory, input_cells new_memory_usage = memory_profiler.memory_usage()[0] memory_delta = new_memory_usage - previous_call_memory_usage keep_watching = False total_memory = psutil.virtual_memory()[0] / 1024 / 1024 # in Mb # calculate time delta using global t1 (from the pre-run event) and current time time_delta_secs = time.time() - t1 num_commands = len(input_cells) - 1 cmd = "In [{}]".format(num_commands) # convert the results into a pretty string output_template = ( "{cmd} used {memory_delta:0.4f} Mb RAM in " "{time_delta:0.2f}s, total RAM usage " "{memory_usage:0.2f} Mb, total RAM " "memory {total_memory:0.2f} Mb" ) output = output_template.format( time_delta=time_delta_secs, cmd=cmd, memory_delta=memory_delta, memory_usage=new_memory_usage, total_memory=total_memory, ) if watching_memory: print(str(output)) previous_call_memory_usage = new_memory_usage
Example #24
Source File: mem_profile.py From QMusic with GNU Lesser General Public License v2.1 | 5 votes |
def cur_python_mem(): mem_usage = memory_usage(-1, interval=0.2, timeout=1) return mem_usage
Example #25
Source File: ipython_memory_usage.py From ipython_memory_usage with BSD 2-Clause "Simplified" License | 5 votes |
def watch_memory(): """Prints the memory usage if watching the memory""" # bring in the global memory usage value from the previous iteration global previous_call_memory_usage, peak_memory_usage, keep_watching, \ watching_memory, input_cells new_memory_usage = memory_profiler.memory_usage()[0] memory_delta = new_memory_usage - previous_call_memory_usage keep_watching = False peaked_memory_usage = max(0, peak_memory_usage - new_memory_usage) # calculate time delta using global t1 (from the pre-run event) and current # time time_delta_secs = time.time() - t1 num_commands = len(input_cells) - 1 cmd = "In [{}]".format(num_commands) # convert the results into a pretty string output_template = ("{cmd} used {memory_delta:0.4f} MiB RAM in " "{time_delta:0.2f}s, peaked {peaked_memory_usage:0.2f} " "MiB above current, total RAM usage " "{memory_usage:0.2f} MiB") output = output_template.format(time_delta=time_delta_secs, cmd=cmd, memory_delta=memory_delta, peaked_memory_usage=peaked_memory_usage, memory_usage=new_memory_usage) if watching_memory: print(str(output)) previous_call_memory_usage = new_memory_usage
Example #26
Source File: ipython_memory_usage_perf.py From ipython_memory_usage with BSD 2-Clause "Simplified" License | 4 votes |
def watch_memory(): import time # bring in the global memory usage value from the previous iteration global previous_call_memory_usage, peak_memory_usage, keep_watching, perf_proc, \ watching_memory, input_cells #nbr_commands = len(In) new_memory_usage = memory_profiler.memory_usage()[0] memory_delta = new_memory_usage - previous_call_memory_usage keep_watching = False peaked_memory_usage = max(0, peak_memory_usage - new_memory_usage) # calculate time delta using global t1 (from the pre-run event) and current # time time_delta_secs = time.time() - t1 perf_values = [] if perf_proc: # pause if necessary to attempt to make sure we get a sample from perf... # as the 100ms min sample time and flushing oddness means I don't get a # sample very quickly for short-running tasks MIN_TIME_TO_GET_PERF_SAMPLE = 0.2 if time_delta_secs < MIN_TIME_TO_GET_PERF_SAMPLE: print("PAUSING to get perf sample for {}s".format(MIN_TIME_TO_GET_PERF_SAMPLE)) time.sleep(MIN_TIME_TO_GET_PERF_SAMPLE) # pause until at least 0.1s has passed # if we have a valid perf running then capture that information perf_values = perf_process.finish_perf(perf_proc) cmd = "" #In[nbr_commands-1] # convert the results into a pretty string #output_template = "'{cmd}' used {memory_delta:0.4f} MiB RAM in {time_delta:0.2f}s, peaked {peaked_memory_usage:0.2f} MiB above current, total RAM usage {memory_usage:0.2f} MiB" output_template = "Used {memory_delta:0.4f} MiB RAM in {time_delta:0.2f}s, peaked {peaked_memory_usage:0.2f} MiB above current, total RAM usage {memory_usage:0.2f} MiB" output = output_template.format(time_delta=time_delta_secs, cmd=cmd, memory_delta=memory_delta, peaked_memory_usage=peaked_memory_usage, memory_usage=new_memory_usage) print(str(output)) if perf_values: perf_average = int(sum(perf_values) / float(time_delta_secs)) #print("perf value for {} averages to {:,}/second, raw samples:".format(perf_process.EVENT_TYPE, perf_average), perf_values) print("perf value for {} averages to {:,}/second".format(perf_process.EVENT_TYPE, perf_average)) else: print("perf - no results to report, possibly the collection time was too short?") previous_call_memory_usage = new_memory_usage