本文整理汇总了Python中memory_profiler.memory_usage函数的典型用法代码示例。如果您正苦于以下问题:Python memory_usage函数的具体用法?Python memory_usage怎么用?Python memory_usage使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了memory_usage函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: checkpoint_memory
def checkpoint_memory():
'''This test is meant to be run manually, since it depends on
memory_profiler and its behavior may vary.'''
try:
from memory_profiler import memory_usage
except ImportError:
return
def f(a):
for _ in range(10):
a = np.sin(a**2 + 1)
return a
checkpointed_f = checkpoint(f)
def testfun(f, x):
for _ in range(5):
x = f(x)
return np.sum(x)
gradfun = grad(testfun, 1)
A = npr.RandomState(0).randn(100000)
max_usage = max(memory_usage((gradfun, (f, A))))
max_checkpointed_usage = max(memory_usage((gradfun, (checkpointed_f, A))))
assert max_checkpointed_usage < max_usage / 2.
开发者ID:RaoJun06,项目名称:autograd,代码行数:25,代码来源:test_wrappers.py
示例2: trace_calls_and_returns
def trace_calls_and_returns(frame, event, arg):
global just_returned
co = frame.f_code
func_name = co.co_name
if just_returned:
just_returned = False
print 'just_returned'
print 'mem before gc ' + str(memory_usage(-1))
# gc.collect()
print 'mem after gc ' + str(memory_usage(-1))
if func_name == 'memory_usage':
return
if func_name == 'write':
# Ignore write() calls from print statements
return
line_no = frame.f_lineno
filename = co.co_filename
if event == 'call':
print 'Call to %s on line %s of %s' % (func_name, line_no, filename)
return trace_calls_and_returns
elif event == 'return':
print 'return of %s => %s' % (func_name, arg)
just_returned = True
print 'mem before gc ' + str(memory_usage(-1))
# gc.collect()
print 'mem after gc ' + str(memory_usage(-1))
elif event == 'line':
print 'a line event ' + str(line_no)
return
开发者ID:1bitaway,项目名称:pycallgraph,代码行数:32,代码来源:test.py
示例3: assertNotIncreasingMemory
def assertNotIncreasingMemory(self,
f,
num_iters=100000,
increase_threshold_absolute_mb=10):
"""Assert memory usage doesn't increase beyond given threshold for f."""
with context.eager_mode():
# Warm up.
f()
# Wait for background threads to start up and take over memory.
# FIXME: The nature of this test leaves few other options. Maybe there
# is a better way to do this.
time.sleep(4)
initial = memory_profiler.memory_usage(-1)[0]
for _ in six.moves.range(num_iters):
f()
increase = memory_profiler.memory_usage(-1)[0] - initial
assert increase < increase_threshold_absolute_mb, (
"Increase is too high. Initial memory usage: %f MB. Increase: %f MB. "
"Maximum allowed increase: %f") % (initial, increase,
increase_threshold_absolute_mb)
开发者ID:aritratony,项目名称:tensorflow,代码行数:26,代码来源:memory_test.py
示例4: memory_profile
def memory_profile(function, *args, **kwargs):
gc.collect()
baseline = memory_profiler.memory_usage()[0]
max_usage = memory_profiler.memory_usage(
(function, args, kwargs),
max_usage=True,
)
return max_usage[0] - baseline
开发者ID:ChinaQuants,项目名称:high_performance_python,代码行数:8,代码来源:iter_vs_list_comprehension.py
示例5: run_tests_if_main
def run_tests_if_main(measure_mem=False):
"""Run tests in a given file if it is run as a script."""
local_vars = inspect.currentframe().f_back.f_locals
if not local_vars.get('__name__', '') == '__main__':
return
# we are in a "__main__"
try:
import faulthandler
faulthandler.enable()
except Exception:
pass
with warnings.catch_warnings(record=True): # memory_usage internal dep.
mem = int(round(max(memory_usage(-1)))) if measure_mem else -1
if mem >= 0:
print('Memory consumption after import: %s' % mem)
t0 = time.time()
peak_mem, peak_name = mem, 'import'
max_elapsed, elapsed_name = 0, 'N/A'
count = 0
for name in sorted(list(local_vars.keys()), key=lambda x: x.lower()):
val = local_vars[name]
if name.startswith('_'):
continue
elif callable(val) and name.startswith('test'):
count += 1
doc = val.__doc__.strip() if val.__doc__ else name
sys.stdout.write('%s ... ' % doc)
sys.stdout.flush()
try:
t1 = time.time()
if measure_mem:
with warnings.catch_warnings(record=True): # dep warn
mem = int(round(max(memory_usage((val, (), {})))))
else:
val()
mem = -1
if mem >= peak_mem:
peak_mem, peak_name = mem, name
mem = (', mem: %s MB' % mem) if mem >= 0 else ''
elapsed = int(round(time.time() - t1))
if elapsed >= max_elapsed:
max_elapsed, elapsed_name = elapsed, name
sys.stdout.write('time: %0.3f sec%s\n' % (elapsed, mem))
sys.stdout.flush()
except Exception as err:
if 'skiptest' in err.__class__.__name__.lower():
sys.stdout.write('SKIP (%s)\n' % str(err))
sys.stdout.flush()
else:
raise
elapsed = int(round(time.time() - t0))
sys.stdout.write('Total: %s tests\n• %0.3f sec (%0.3f sec for %s)\n• '
'Peak memory %s MB (%s)\n'
% (count, elapsed, max_elapsed, elapsed_name, peak_mem,
peak_name))
开发者ID:palday,项目名称:mne-python,代码行数:55,代码来源:testing.py
示例6: watch_memory
def watch_memory():
# bring in the global memory usage value from the previous iteration
global previous_call_memory_usage, peak_memory_usage, keep_watching, \
watching_memory, input_cells
new_memory_usage = memory_profiler.memory_usage()[0]
memory_delta = new_memory_usage - previous_call_memory_usage
keep_watching = False
peaked_memory_usage = max(0, peak_memory_usage - new_memory_usage)
# calculate time delta using global t1 (from the pre-run event) and current
# time
time_delta_secs = time.time() - t1
num_commands = len(input_cells) - 1
cmd = "In [{}]".format(num_commands)
# convert the results into a pretty string
output_template = ("{cmd} used {memory_delta:0.4f} MiB RAM in "
"{time_delta:0.2f}s, peaked {peaked_memory_usage:0.2f} "
"MiB above current, total RAM usage "
"{memory_usage:0.2f} MiB")
output = output_template.format(time_delta=time_delta_secs,
cmd=cmd,
memory_delta=memory_delta,
peaked_memory_usage=peaked_memory_usage,
memory_usage=new_memory_usage)
if watching_memory:
print(str(output))
previous_call_memory_usage = new_memory_usage
开发者ID:Elijahwexx,项目名称:ipython_memory_usage,代码行数:26,代码来源:ipython_memory_usage.py
示例7: watch_memory
def watch_memory(self):
if not self.watching_memory:
return
# calculate time delta using global t1 (from the pre-run
# event) and current time
self.time_delta = time.time() - self.t1
new_memory_usage = memory_profiler.memory_usage()[0]
self.memory_delta = new_memory_usage - self.previous_call_memory_usage
self.keep_watching = False
self.peaked_memory_usage = max(0, self.peak_memory_usage - new_memory_usage)
num_commands = len(self.input_cells) - 1
cmd = "In [{}]".format(num_commands)
# convert the results into a pretty string
output_template = ("{cmd} used {memory_delta:0.3f} MiB RAM in "
"{time_delta:0.3f}s, peaked {peaked_memory_usage:0.3f} "
"MiB above current, total RAM usage "
"{memory_usage:0.3f} MiB")
output = output_template.format(
time_delta=self.time_delta,
cmd=cmd,
memory_delta=self.memory_delta,
peaked_memory_usage=self.peaked_memory_usage,
memory_usage=new_memory_usage)
print(str(output))
self.previous_call_memory_usage = new_memory_usage
开发者ID:enascimento,项目名称:ipython_memwatcher,代码行数:25,代码来源:memwatcher.py
示例8: run_example
def run_example(self, theInput, theOutput):
# Import program (decrapted in 3.4, no other way at the moment)
from importlib.machinery import SourceFileLoader
solution = SourceFileLoader("solution", self.programPath).load_module()
# Feed the input
with PatchStd(theInput) as std:
# Start time counter
startTime = time.time()
# Run the program
solution.main()
# Get end time
endTime = time.time() - startTime
# Get memory (include current tests ~14MB but more or less is that)
mem = memory_usage(max_usage=True)
# Check output
actual_output = std.getStdOut().getvalue()
self.assertEqual(actual_output, theOutput)
# Print time (not do before because output is not yet retrieved)
std.restore()
print("\tTime: %.3f sec" % endTime)
print("\tMemory: %.3f MB" % mem)
# Show errors if any
errors = std.getStdErr().getvalue()
if errors != '':
print("\t" + errors)
开发者ID:aetel,项目名称:ieeextreme9,代码行数:32,代码来源:xtremetests.py
示例9: test_memory_leak
def test_memory_leak():
"""https://github.com/emcconville/wand/pull/127"""
minimum = 1.0
with Color('NONE') as nil_color:
minimum = ctypes.sizeof(nil_color.raw)
consumes = memory_usage((color_memory_leak, (), {}))
assert consumes[-1] - consumes[0] <= minimum
开发者ID:dahlia,项目名称:wand,代码行数:7,代码来源:color_test.py
示例10: test
def test(prefix, item_ct, loop_ct):
h5 = h5py.File(sys.argv[1], "a")
cc = h5.id.get_mdc_config()
print(cc.max_size)
print(h5.id.get_access_plist().get_cache())
cc.min_size = 1024 * 1024
cc.max_size = 1024 * 1024
h5.id.set_mdc_config(cc)
# h5.create_group(b'\xff') # force new compact-or-indexed group with high byte
# del h5[b'\xff']
print("start %s %.3f" % (prefix, time.time() - start))
path_i = 0
for i in range(loop_ct):
data = np.zeros(item_ct, dtype=np.float32)
i_str = "%05d" % i
# path = ''
# path = '/'.join((prefix, i_str[0], i_str[1], i_str[2], i_str[3]))
path = str(path_i)
# print(path + '/' + str(i))
ds = h5.create_dataset(path + "/" + str(i), data=data)
# ds = h5.create_dataset(path + '/' + str(i), (item_ct,), dtype=np.float32)
# ds[:] = data
# h5.create_group(path + '/' + i_str)
if i != 0 and i % close_interval == 0:
path_i += 1
# print(i, '%.1f' % (time.time() - start), h5.id.get_mdc_size(), h5.id.get_mdc_hit_rate())
print(
i,
"%.1f" % (time.time() - start),
h5.id.get_mdc_size(),
h5.id.get_mdc_hit_rate(),
memory_profiler.memory_usage(-1),
)
print("end %s %.3f" % (prefix, time.time() - start))
h5.close()
开发者ID:sanketsharma411,项目名称:quac,代码行数:35,代码来源:h5leak.py
示例11: memorySummary
def memorySummary():
global memLog
global memList
global memMax
if memLog:
gc.collect()
try:
from memory_profile import memory_usage
mem = memory_usage(-1)[0]
except:
raise
# except:
# try:
# mem = int(os.popen('ps -p %d -o vsz|tail -1' % os.getpid()).read())/1024.0
# except:
# try:
# #CLE doesn't have full ps functionality
# #try to find first occurence of pid, then get memory usage slot
# #mwf debug
# #import pdb
# #pdb.set_trace()
# plist = os.popen('ps ').read()
# pr = plist.split('%s' % os.getpid())
# #print 'pr = %s ' % pr
# #print 'pr[1] = %s ' % pr[1]
# mem = int(pr[1].split()[1])/1024.0
# except:
# logEvent("memory function doesn't work on this platform\n")
# mem = 0
if mem > 0:
for pair in memList:
logEvent(`pair[0]`+" %"+`100.0*pair[1]/memMax`)
开发者ID:JWW81,项目名称:proteus,代码行数:32,代码来源:Profiling.py
示例12: watch_memory
def watch_memory():
import time
# bring in the global memory usage value from the previous iteration
global previous_call_memory_usage, peak_memory_usage, keep_watching
nbr_commands = len(In)
new_memory_usage = memory_profiler.memory_usage()[0]
memory_delta = new_memory_usage - previous_call_memory_usage
keep_watching = False
peaked_memory_usage = max(0, peak_memory_usage - new_memory_usage)
# calculate time delta using global t1 (from the pre-run event) and current
# time
time_delta_secs = time.time() - t1
cmd = In[nbr_commands - 1]
# convert the results into a pretty string
output_template = "'{cmd}' used {memory_delta:0.4f} MiB RAM in {time_delta:0.2f}s, peaked {peaked_memory_usage:0.2f} MiB above current, total RAM usage {memory_usage:0.2f} MiB"
output = output_template.format(
time_delta=time_delta_secs,
cmd=cmd,
memory_delta=memory_delta,
peaked_memory_usage=peaked_memory_usage,
memory_usage=new_memory_usage,
)
print(str(output))
previous_call_memory_usage = new_memory_usage
开发者ID:Sandy4321,项目名称:ipython_memory_usage,代码行数:25,代码来源:ipython_memory_usage.py
示例13: f
def f(a, v):
print "running", os.getpid(), len(a)
print "in process", memory_profiler.memory_usage()
#a[0] = 'x'
print "did arr get changed in process?", a[:5]
v.value = len(a)
time.sleep(5)
开发者ID:ChinaQuants,项目名称:high_performance_python,代码行数:7,代码来源:cow1.py
示例14: __init__
def __init__(self, method, n=1, timeout=120.0):
from memory_profiler import memory_usage
import time
from util import avg
self.timeout = timeout
max_memory_list = []
avg_memory_list = []
time_list = []
for i in range(n):
runner = self._Runner(memory_usage, {'proc': method, 'interval': 0.01, 'retval': True}, timeout)
baseline = avg(memory_usage((lambda: None, ()))) # baseline memory usage measurement
start_time = time.time()
runner.run() # actual running of the method
end_time = time.time()
if runner.result is not None:
mem_use, self.result = runner.result
memory = map(lambda x: x - baseline, mem_use)
else: # there was a timeout
import sys
memory = [-sys.maxint]
self.result = None
max_memory_list.append(max(memory))
avg_memory_list.append(avg(memory))
time_list.append((end_time - start_time) * 1000)
self.max_memory = avg(max_memory_list)
self.avg_memory = avg(avg_memory_list)
self.time = avg(time_list)
开发者ID:KDercksen,项目名称:SPML,代码行数:35,代码来源:profiler.py
示例15: custom_minimize
def custom_minimize(function, algorithm, bounds = None, guess = None):
def iter_minimize(): # lightweight version of iter_minimize for a single optimization method
start = timeit.default_timer()
result = 0
# some minimization techniques do not require an initial guess
if guess is not None:
result = algorithm(function, guess)
else:
result = algorithm(function, bounds)
iterations = -1
if 'nit' in result.keys():
iterations = result.get('nit')
stop = timeit.default_timer()
return iterations, start, stop, result
#tracks amount of memory used by current process (-1) every interval (.2 seconds)
current_memory = memory_usage(-1, interval=.2)
most_mem = np.append(most_mem, max(current_memory))
num_iters, start, stop, result = iter_minimize()
exec_time = stop-start
print '{0} took {1} seconds. The result, {2} was found at ({3})'.format(algorithm.__name__,exec_time,result.fun,result.x)
print '{0} used {1} megabytes and took {2} iterations'.format(algorithm.__name__,most_mem,num_iters)
print
开发者ID:ednusi,项目名称:MatPy,代码行数:35,代码来源:_test_suite_July_05_2016.py
示例16: run
def run(self):
jobs = Job.objects.due()
if self.do_profile:
try:
import memory_profiler
prof_string = "[%8.2f MB] " % memory_profiler.memory_usage()[0]
except ImportError:
prof_string = "No profiler found"
else:
prof_string = ""
if jobs:
logging.info(
"%sRunning %d due jobs... (%s)"
% (prof_string, jobs.count(), ", ".join(['"%s"' % job.name for job in jobs]))
)
for job in Job.objects.due():
job.run()
else:
logging.debug("%sNo jobs due to run." % prof_string)
if self.do_gc:
gc.collect()
开发者ID:prpankajsingh,项目名称:ka-lite,代码行数:25,代码来源:cronserver.py
示例17: during_execution_memory_sampler
def during_execution_memory_sampler():
import time
import memory_profiler
global keep_watching, peak_memory_usage
peak_memory_usage = -1
keep_watching = True
n = 0
WAIT_BETWEEN_SAMPLES_SECS = 0.001
MAX_ITERATIONS = 60.0 / WAIT_BETWEEN_SAMPLES_SECS
while True:
mem_usage = memory_profiler.memory_usage()[0]
peak_memory_usage = max(mem_usage, peak_memory_usage)
time.sleep(WAIT_BETWEEN_SAMPLES_SECS)
if not keep_watching or n > MAX_ITERATIONS:
# exit if we've been told our command has finished or if it has run
# for more than a sane amount of time (e.g. maybe something crashed
# and we don't want this to carry on running)
if n > MAX_ITERATIONS:
print(
"{} SOMETHING WEIRD HAPPENED AND THIS RAN FOR TOO LONG, THIS THREAD IS KILLING ITSELF".format(
__file__
)
)
break
n += 1
开发者ID:Sandy4321,项目名称:ipython_memory_usage,代码行数:27,代码来源:ipython_memory_usage.py
示例18: time_svd
def time_svd(svdfunc, N1, N2, f, rseed=0, bestof=3, args=None, matfunc=np.asarray, **kwargs):
if args is None:
args = ()
N1_N2_f = np.broadcast(N1, N2, f)
times = []
memory = []
for (N1, N2, f) in N1_N2_f:
M = sparse_matrix(N1, N2, f, matfunc, rseed)
t_best = np.inf
mem_best = np.inf
for i in range(bestof):
t0 = time()
if args:
_args = [M]
_args.extend(list(args))
else:
_args = (M,)
mem_usage = max(memory_usage((svdfunc, _args, kwargs)))
t1 = time()
t_best = min(t_best, t1 - t0)
mem_best = min(mem_best, mem_usage)
times.append(t_best)
memory.append(mem_best)
return np.array(times).reshape(N1_N2_f.shape), np.array(memory).reshape(N1_N2_f.shape)
开发者ID:joshloyal,项目名称:ml-benchmarks,代码行数:28,代码来源:test_svd.py
示例19: leak
def leak():
data = [pa.array(np.concatenate([np.random.randn(100000)] * 1000))]
table = pa.Table.from_arrays(data, ['foo'])
while True:
print('calling to_pandas')
print('memory_usage: {0}'.format(memory_profiler.memory_usage()))
table.to_pandas()
gc.collect()
开发者ID:hdfeos,项目名称:arrow,代码行数:8,代码来源:test_leak.py
示例20: Map
def Map(L):
results = {} # key value storage
for line in L:
key = str(line[0] +":" + line[1] +":" + line[2]);
try:
results[key] += 1
except KeyError:
results[key] = 1
return results, [multiprocessing.current_process().name, memory_usage(-1, interval=.0001, timeout=.0001).pop()], [sys.getsizeof(L), sys.getsizeof(results)]
开发者ID:2XL,项目名称:python-multiprocessing,代码行数:9,代码来源:test.py
注:本文中的memory_profiler.memory_usage函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论