本文整理汇总了Python中timeit.default_timer函数的典型用法代码示例。如果您正苦于以下问题:Python default_timer函数的具体用法?Python default_timer怎么用?Python default_timer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了default_timer函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __download_range
def __download_range(self, k, dst):
try:
_, ext = os.path.splitext(dst)
ds = []
parts = []
logging.info("Download %s start", k.name)
for startByte in range(0, k.size, self.splitMB):
output_part = self.new_temp_file(suffix=ext)
parts.append(output_part)
endByte = min(startByte + self.splitMB - 1, k.size)
logging.debug(
"deferToThreadPool %s start=%d end=%d size=%d cnt=%d",
k.name,
startByte,
endByte,
endByte - startByte,
len(ds),
)
d = twisted.internet.threads.deferToThreadPool(
reactor,
reactor.getThreadPool(), # @UndefinedVariable
self.__downloadOne,
k,
startByte,
endByte,
output_part,
len(ds),
)
ds.append(d)
if os.path.exists(dst):
os.remove(dst)
fout = file(dst, "wb")
start = timeit.default_timer()
for cnt, p in enumerate(parts):
yield ds[cnt]
shutil.copyfileobj(file(p, "rb"), fout)
size = min(k.size, (cnt + 1) * self.splitMB)
elapsed = timeit.default_timer() - start
speedstr = formatFileSize(size / elapsed)
sizestr = formatFileSize(size)
percent = (float(cnt) / len(parts)) * 100.0
logging.info(
"%03d/%03d (%.2f%%) speed=%s/s, elapsed=%.2f, size=%s",
cnt,
len(parts),
percent,
speedstr,
elapsed,
sizestr,
)
except Exception:
logging.error("download error", exc_info=True)
raise
开发者ID:aubonbeurre,项目名称:pysugar,代码行数:60,代码来源:s3utils.py
示例2: __execEvent__
def __execEvent__(self, eventName, ntime, commandHandler):
last = self.__events__[eventName]["lastExecTime"]
timeInterval = self.__events__[eventName]["timeInterval"]
if ntime - last >= timeInterval:
start = default_timer()
self.__events__[eventName]["function"](commandHandler, self.__events__[eventName]["channels"])
timeTaken = default_timer() - start
stats = self.__events__[eventName]["stats"]
if stats["average"] == None:
stats["average"] = timeTaken
stats["min"] = timeTaken
stats["max"] = timeTaken
else:
stats["average"] = (stats["average"]+timeTaken) / 2.0
if timeTaken < stats["min"]:
stats["min"] = timeTaken
if timeTaken > stats["max"]:
stats["max"] = timeTaken
self.__events__[eventName]["lastExecTime"] = time.time()
开发者ID:DarkMio,项目名称:Renol-IRC,代码行数:27,代码来源:BotEvents.py
示例3: main
def main():
start_time = timeit.default_timer()
proxies = []
targets = ['http://www.google-proxy.net/','http://free-proxy-list.net/']
for i in range(len(targets)):
proxy = proxy_scraper(targets[i])
for u in range(len(proxy)):
proxy_found = str(proxy[u]['ip'])+":"+str(proxy[u]['port'])
if proxy_found not in proxies:
if is_proxy_existed(proxy[u]['ip']) != True:
print proxy[u]['ip'] +" - "+ proxy[u]['port'] +" - "+ proxy[u]['hostname']
create_proxy(proxy[u]['ip'], proxy[u]['port'], proxy[u]['hostname'], proxy[u]['service'], proxy[u]['latitude'], proxy[u]['longitude'], proxy[u]['city'], proxy[u]['country'])
proxies.append(proxy_found)
# save to a file
file_name = "data_proxies.cfg"
write_file( file_name, "\n".join(proxies) )
print("\n%s proxies found. File saved. You can find it under '%s'." % (len(proxies), file_name))
# measure time
print "\nElapsed time: %d sec" % (timeit.default_timer() - start_time)
开发者ID:toanlk,项目名称:python,代码行数:25,代码来源:proxies.py
示例4: trim_data
def trim_data(crime_data, part, total_parts):
print 'Trimming unnecessary data...',
time1 = tm.default_timer()
crime_data = crime_data[crime_data['YEAR'] >= 2006]
crime_data = crime_data[crime_data['YEAR'] <= 2015]
crime_data = crime_data[pd.notnull(crime_data['NEIGHBOURHOOD'])]
crime_data = crime_data.drop('HUNDRED_BLOCK', axis=1)
crime_data = crime_data.sort_index()
if TEST_VAL:
print 'Taking subset of crime data (1000 row sample)...',
crime_data = crime_data.head(1005)
if part is not None and total_parts is not None:
start_index = int(1.0*(part-1)/total_parts*crime_data['YEAR'].count())
end_index = int(1.0*part/total_parts*crime_data['YEAR'].count())
if part == total_parts: end_index = crime_data['YEAR'].count()
crime_data = crime_data[start_index:end_index]
print 'Start index, end index, size:',start_index,end_index, crime_data['YEAR'].count()
print 'Finished'
print 'Time taken:', tm.default_timer()-time1, ' seconds\n'
return crime_data
开发者ID:wmaciel,项目名称:van-crime,代码行数:27,代码来源:create_feature_vector.py
示例5: read_features
def read_features(features):
"""
read all the features in the 'features' array and return a numpy array
currently only compute the grand mean and std
"""
start = timeit.default_timer()
x = []
y = []
for fn in glob.glob(os.path.join(FT_DIR, "*.npy")):
start = fn.rfind('/')
end = fn.rfind('.')
ext = fn[start+1:end]
genre, _= ext.split('_')
data = np.load(fn)
surface_ft = data[:-1] #5 features
ft_vec = [np.mean(ft) for ft in surface_ft] + [np.std(ft) for ft in surface_ft]
ceps = data[-1]#mfcc features
cep_len = len(ceps)
ft_vec += np.mean(ceps[int(cep_len / 10.):int(cep_len * 9 / 10.)], axis=0).tolist()
x.append(ft_vec)
y.append(GENRE_DICT[genre])
end = timeit.default_timer()
print("reading all features takes: ", (end - start))
return np.array(x), np.array(y)
开发者ID:nwang57,项目名称:genreClassifier,代码行数:27,代码来源:features.py
示例6: evaluate
def evaluate(im, algo, gt_illuminant, i, range_thresh, bin_num, dst_folder):
new_im = None
start_time = timeit.default_timer()
if algo=="grayworld":
new_im = cv2.xphoto.autowbGrayworld(im, 0.95)
elif algo=="nothing":
new_im = im
elif algo=="learning_based":
new_im = cv2.xphoto.autowbLearningBased(im, None, range_thresh, 0.98, bin_num)
elif algo=="GT":
gains = gt_illuminant / min(gt_illuminant)
g1 = float(1.0 / gains[2])
g2 = float(1.0 / gains[1])
g3 = float(1.0 / gains[0])
new_im = cv2.xphoto.applyChannelGains(im, g1, g2, g3)
time = 1000*(timeit.default_timer() - start_time) #time in ms
if len(dst_folder)>0:
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
im_name = ("%04d_" % i) + algo + ".jpg"
cv2.imwrite(os.path.join(dst_folder, im_name), stretch_to_8bit(new_im))
#recover the illuminant from the color balancing result, assuming the standard model:
estimated_illuminant = [0, 0, 0]
eps = 0.01
estimated_illuminant[2] = np.percentile((im[:,:,0] + eps) / (new_im[:,:,0] + eps), 50)
estimated_illuminant[1] = np.percentile((im[:,:,1] + eps) / (new_im[:,:,1] + eps), 50)
estimated_illuminant[0] = np.percentile((im[:,:,2] + eps) / (new_im[:,:,2] + eps), 50)
res = np.arccos(np.dot(gt_illuminant,estimated_illuminant)/
(np.linalg.norm(gt_illuminant) * np.linalg.norm(estimated_illuminant)))
return (time, (res / np.pi) * 180)
开发者ID:AryaPhilip,项目名称:opencv_contrib,代码行数:33,代码来源:color_balance_benchmark.py
示例7: spawn_runpy
def spawn_runpy(cp, wait=60, cb=check_rst):
"as decorator to run job"
global WAITQ, RUNQ, CFG
pool = Pool(processes=CFG['MAXJOBS'])
while len(WAITQ) > 0 or len(RUNQ) > 0:
if len(RUNQ) <= CFG['MAXJOBS'] and len(WAITQ) > 0:
path, test = WAITQ.pop()
rst = pool.apply_async(call_runpy, (cp, path, test,))
RUNQ.append((rst, test, timeit.default_timer()))
else:
for r in RUNQ:
usec = float("%.2f" %(timeit.default_timer()-r[2]))
if r[0].successful:
print "[{0}] success used {1} usec".format(r[1], usec)
RUNQ.remove(r)
if cb:
cb(r[1], 'pass', usec)
else:
if usec > CFG['TIMEOUT']:
print "[{0}] unsuccess used timeout {1} usec".format(r[1], usec)
r[0].terminate()
if cb:
cb(r[1], 'fail', usec)
time.sleep(float(wait))
开发者ID:funningboy,项目名称:smtdv,代码行数:25,代码来源:unittest.py
示例8: load_indicators_to_mongo_zh
def load_indicators_to_mongo_zh(is_incremental):
print("start loading indicator data(zh) from JSON file to MongoDB...")
all_start = timeit.default_timer()
static = Static()
f = io.open(static.output_folder + '/worldbank_wdi_indicators_zh.json', 'r', encoding='utf8', errors='ignore')
json_str = f.readline()
indicator_array = json.loads(json_str)
f.close()
client = MongoClient(static.mongo_url, static.mongo_port)
db = client[static.database_name]
## print(db.collection_names())
indicator_col = db[static.indicator_col_name]
if not is_incremental:
indicator_col.drop()
for ind in indicator_array:
indicator_key = ind['id'].replace('.', '_') + '_ZH'
data_type = 'number'
if(ind['name'].find('百分比') > -1):
data_type = 'percentage'
topics = []
for topic in ind['topics']:
topics.append(topic['value'])
indicator_rec = {'indicator_key': indicator_key, 'original_id': ind['id'], 'indicator_text': ind['name'], 'data_type': data_type, 'sourceOrganization': ind['sourceOrganization'], 'sourceNote': ind['sourceNote'], 'topics': topics, 'data_source': '世界发展指标', 'dimension': [{'dimension_key': 'year', 'dimension_text': '年'}, {'dimension_key': 'region', 'dimension_text': '区域'}, {'dimension_key': 'country', 'dimension_text': '国家'}]}
pk = indicator_col.insert(indicator_rec)
print(indicator_key + ' ' + ind['name'] + ' inserted.')
print("job is complete.")
print("total records: " + str(indicator_col.count()))
print("total time cost: " + str(round(timeit.default_timer() - all_start)) + 's')
开发者ID:benitoro,项目名称:datanium,代码行数:28,代码来源:data_load_worldbank_wdi.py
示例9: main
def main():
"""
"""
logging.info("Reading file:%s", "data/sample.avi")
vid = AoRecording.AoRecording(filepath="data/sample.avi")
vid.load_video()
logging.info("Starting parallel processing")
tic = timeit.default_timer()
vid.filter_frames()
vid.fixed_align_frames()
vid.complete_align_parallel()
vid.create_average_frame()
vid.create_stdev_frame()
toc = timeit.default_timer()
print "Parallel Process took {}:".format(toc - tic)
vid.create_stdev_frame()
logging.info("writing output")
vid.write_video("output/output_parallel.avi")
vid.write_average_frame("output/lucky_average_parallel.png")
vid.write_frame("output/lucky_stdev.png", "stdev")
logging.info("Starting serial processing")
tic = timeit.default_timer()
vid.filter_frames()
vid.fixed_align_frames()
vid.complete_align()
vid.create_average_frame()
toc = timeit.default_timer()
print "Serial Process took {}:".format(toc - tic)
logging.info("writing output")
vid.write_video("output/output_serial.avi")
vid.write_frame("output/lucky_average_serial.png", "average")
开发者ID:tomwright01,项目名称:AO_Registration,代码行数:35,代码来源:example.py
示例10: _run_analyzers_on_event
def _run_analyzers_on_event(self):
'''Run all analysers on the current event, self.event.
Returns a tuple (success?, last_analyzer_name).
'''
for i,analyzer in enumerate(self._analyzers):
if not analyzer.beginLoopCalled:
analyzer.beginLoop(self.setup)
start = timeit.default_timer()
if self.memReportFirstEvent >=0 and iEv >= self.memReportFirstEvent:
memNow=resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if memNow > self.memLast :
print "Mem Jump detected before analyzer %s at event %s. RSS(before,after,difference) %s %s %s "%( analyzer.name, iEv, self.memLast, memNow, memNow-self.memLast)
self.memLast=memNow
ret = analyzer.process( self.event )
if self.memReportFirstEvent >=0 and iEv >= self.memReportFirstEvent:
memNow=resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if memNow > self.memLast :
print "Mem Jump detected in analyzer %s at event %s. RSS(before,after,difference) %s %s %s "%( analyzer.name, iEv, self.memLast, memNow, memNow-self.memLast)
self.memLast=memNow
if self.timeReport:
self.timeReport[i]['events'] += 1
if self.timeReport[i]['events'] > 0:
self.timeReport[i]['time'] += timeit.default_timer() - start
if ret == False:
return (False, analyzer.name)
return (True, analyzer.name)
开发者ID:clementhelsens,项目名称:heppy,代码行数:26,代码来源:looper.py
示例11: load_rowdata_to_mongo_zh
def load_rowdata_to_mongo_zh(is_incremental):
print("start loading row data(zh) from JSON file to MongoDB...")
all_start = timeit.default_timer()
static = Static()
bydim_dir = static.output_folder + static.dataset_bydim_folder
client = MongoClient(static.mongo_url, static.mongo_port)
db = client[static.database_name]
dataset_col = db[static.dataset_col_name]
if not is_incremental:
dataset_col.drop()
file_path_array = []
for idx, file in enumerate(os.listdir(bydim_dir)):
file_path = os.path.join(bydim_dir, file)
if os.path.isfile(file_path):
file_path_array.append(file_path)
print(str(len(file_path_array)) + " files are loaded")
counter = []
mapfunc = partial(insert_by_dim, counter=counter, dataset_col=dataset_col, all_start=all_start)
pool = ThreadPool(12)
pool.map(mapfunc, file_path_array)
pool.close()
pool.join()
print("All the threads are completed. Total number is " + str(len(counter)) + "\n")
print("total time cost: " + str(round(timeit.default_timer() - all_start)) + 's')
开发者ID:benitoro,项目名称:datanium,代码行数:28,代码来源:data_load_worldbank_wdi.py
示例12: worker
def worker(F, chargers, sensors, p_list, sensors_p, p_list_p):
"""worker function, used to create processing"""
result = {}
tic = timeit.default_timer()
anser = reconfiguration.iaa.solution(chargers, sensors, p_list, args['B'], sensors_p, p_list_p, F, args['p_min'])
toc = timeit.default_timer()
result['IAA'] = (toc - tic, anser)
if DEBUG:
print "============================================="
print "# solution IAA #"
print "============================================="
pprint(anser)
tic = timeit.default_timer()
anser = solution.solutionOpt.solution(chargers, sensors_p, p_list_p)
toc = timeit.default_timer()
result['Opt'] = (toc - tic, anser)
if DEBUG:
print "============================================="
print "# solution Opt #"
print "============================================="
pprint(anser)
return result
开发者ID:kongfy,项目名称:PPP,代码行数:25,代码来源:extension1.py
示例13: loop_sd_mean
def loop_sd_mean(alphabet):
print("======== sd-mean test===========")
start = timeit.default_timer()
count = 0
letters_number_list = []
entropy_list = []
for i in list(range(1,101)): # this is sd
alphabet1 = eliminate_sd(alphabet,i)
for j in list(range(1,101)): # this is mean
alphabet2 = eliminate_mean(alphabet1,j)
letters_number = len(alphabet2)
letters_number_list.append((i,j,letters_number))
balanced_alphabet = rebalance(alphabet2)
entropy = calculate_entropy(balanced_alphabet)
entropy_list.append((i,j,entropy))
count = count+1
print(count)
stop = timeit.default_timer()
time = (stop - start)
print (letters_number_list)
print (entropy_list)
print("======== sd-mean test===========")
print('Running Time (s): %f' %time)
开发者ID:fongchris,项目名称:timeseries,代码行数:28,代码来源:old_info_gain.py
示例14: analyze_files
def analyze_files(self, iterCount, loci_classes, adapt_threshold):
Rmodel = VRmodel.VregMRmodel(iterCount, loci_classes, adapt_threshold)
print "len(Rmodel.rfmodels)=", len(Rmodel.rfmodels)
ofile = open("bkg_out.dat","a+")
Rmodel.set_bckgoutfile( ofile )
for species in self.speciesList:
fbar= self.S[species]["WGS"]
print fbar
outFile = self.outDir + os.path.basename(fbar).replace(".fasta", "_"+str(iterCount)+"_outRF.fasta")
ofile = open(outFile,"w")
Rmodel.set_outfile( ofile )
fb = self.outDir + os.path.basename(fbar).replace(".fasta", "_"+str(iterCount)+"_exon.fasta")
exfile1 = open(fb,"w")
Rmodel.set_exon_outfiles( exfile1 )
start_time = timeit.default_timer()
gene_cnt=0
for strand in [1, -1]:
qbar=deepcopy(self.contigs)
print "STRAND=", strand
for record in SeqIO.parse(fbar, "fasta"):
if self.check_contigs:
if ( record.id.split("|")[3] not in self.contigs):
continue
print "record.id=", record.id
print "cnts=",record.id.split("|")[3]
print "qbar=", qbar
if self.check_contigs:
qbar.remove(record.id.split("|")[3])
if strand == 1:
seq=record.seq
else:
seq=record.seq.reverse_complement()
Rmodel.set_record(record.id, record.name, record.description)
seq_size=len(seq)
res= self.mapper( divide_work(seq) )
"""
print "len(res)=", len(res)
for ix in range(2):
print res[ix][0], res[ix][1], type(res[ix][2])
"""
Elist=Rmodel.exon_MRprobabilities(res)
gene_cnt = Rmodel.V_exon_model(gene_cnt, seq, strand, Elist)
#res=None
#Elist=None
if len(qbar)==0:
break
ofile.close()
elapsed = timeit.default_timer() - start_time
print "ELAPSED TIME =", elapsed
开发者ID:dnolivieri,项目名称:MResVgene,代码行数:60,代码来源:mrvPredict03.py
示例15: runTestCode
def runTestCode(self):
"""
This function ties into the debug menu. It is meant to allow execution
of some test code. Feel free to change the contents of this function.
"""
start = timeit.default_timer()
monsters = []
lib = Libraries.MonsterLibrary()
stop = timeit.default_timer()
time = stop - start
print "Created library in " + str(time) + " seconds"
for i in range(0, 10000):
myRandom = lib.getRandomMonster(random.randint(0, 80))
monsters.append(myRandom)
# lib = Libraries.ItemLibrary()
# myItem = lib.createItem('heal')
# print myItem
# myItem = lib.createItem('sword')
# print myItem
# myItem = lib.createItem('cloak')
# print myItem
# myItem = lib.createItem('fireball')
# print myItem
stop = timeit.default_timer()
time = stop - start
print "Created " + str(len(monsters)) + " monsters in " + str(time) + " seconds"
开发者ID:kba,项目名称:advanced,代码行数:31,代码来源:ApplicationLibtcod.py
示例16: evaluate
def evaluate(self, p, sim, plt):
start = timeit.default_timer()
sim.run(p.T)
end = timeit.default_timer()
speed = p.T / (end - start)
data = sim.data[self.p_ens]
last = []
for row in data.T:
nz = np.nonzero(row>0.05)[0]
if len(nz) == 0:
last.append(0)
else:
last.append(nz[-1])
time_to_inhibit = np.array(last)*p.dt
if plt:
plt.plot(sim.trange(), sim.data[self.p_ens])
for t in time_to_inhibit:
plt.axvline(t)
plt.axhline(0.05, linestyle='--', c='k')
plt.xlabel('time (s) with increasing inhibition')
plt.ylabel('decoded output')
return dict(time_to_inhibit=np.mean(time_to_inhibit),
speed=speed)
开发者ID:tcstewar,项目名称:nengo_benchmarks,代码行数:27,代码来源:inhibit.py
示例17: test_exercise_6
def test_exercise_6(self):
con = self.con
con.isolation_level = None
cur = con.cursor()
N = 30000
#############################
# Exercise 6
#
# Change the following schema to include an index on column "a".
cur.execute('CREATE TABLE "numbers" (a INTEGER)')
#
#
#############################
rows = []
for i in range(0, N):
rows.append( (i,) )
cur.executemany('INSERT INTO "numbers" VALUES (?)', rows)
start_time = timeit.default_timer()
cur.execute('select min(a) from numbers')
print("exercise_6: That took %f ms." % ((timeit.default_timer() - start_time) * 1000,))
data = cur.fetchall()
cur.close()
self.assertTrue(data[0][0] == 0)
开发者ID:Preetam,项目名称:intro-to-databases,代码行数:29,代码来源:exercises.py
示例18: pretrain
def pretrain(self, examples, epoch_counts, corruption_rates, learning_rates):
"""
Trains the network for autoencoding on the given examples, given lists of
epoch counts, corruption rates, and learning rates each equal in length to
the number of layers in the stack.
"""
tfs = self.get_training_functions(corruption_rates, learning_rates)
indices = list(range(examples.get_value(borrow=True).shape[0]))
start_time = timeit.default_timer()
for i in range(len(self.layers)):
# TODO: batches?
for epoch in range(epoch_counts[i]):
self.rng.shuffle(indices)
costs = []
for j in indices:
cost = tfs[i](examples.get_value(borrow=True)[j].reshape(-1))
costs.append(cost)
debug(
"... [{}] epoch {: 3d} at layer {: 2d} done {} ...".format(
str(datetime.timedelta(seconds=timeit.default_timer()-start_time)),
epoch + 1,
i,
"(min/avg cost {:0.3f}/{:0.3f})".format(
float(min(costs)),
float(sum(costs)/float(len(costs))),
)
)
)
开发者ID:solsword,项目名称:fleece,代码行数:28,代码来源:ngen.py
示例19: launch_jobs
def launch_jobs(quandl_codes, num_workers, calc_date, authtoken="", freq='M', span=60):
job_queue = Queue.Queue()
for b in quandl_codes:
job_queue.put(b)
print "Length %d"%job_queue.qsize()
thlist = []
s_time = timeit.default_timer()
fp = open("output.csv","w")
heading = "Ticker, Date, "+",".join(Worker._itemlist)+"\n"
fp.write(heading)
s_date = dutil.shift_months(calc_date, -(span+6))
trim_start = s_date.strftime('%Y-%m-%d')
trim_end = calc_date.strftime('%Y-%m-%d')
calc_param = {"calc_date": calc_date, "freq" : freq, "span":60}
for i in range(num_workers):
th = Worker(job_queue, trim_start, trim_end, calc_param, authtoken, fp)
th.daemon = True
th.start()
thlist.append(th)
print "Finished launching jobs"
e_time = timeit.default_timer()
print "Time taken ",(e_time - s_time)
# block until the queue is empty
job_queue.join()
开发者ID:gouthambs,项目名称:FinLib,代码行数:28,代码来源:main.py
示例20: train
def train(self, examples, cv_extract, epochs, learning_rate):
"""
Specializes the network for prediction on the given examples, using the
given center extract function, the given number of epochs, and the given
learning rate.
"""
input = T.vector(name="training_input", dtype=theano.config.floatX)
tf = self.get_specialization_function(input, cv_extract, learning_rate)
indices = list(range(examples.get_value(borrow=True).shape[0]))
start_time = timeit.default_timer()
# TODO: batches?
for epoch in range(epochs):
self.rng.shuffle(indices)
costs = []
for j in indices:
cost = tf(examples.get_value(borrow=True)[j].reshape(-1))
costs.append(cost)
debug(
"... [{}] epoch {: 3d} done {} ...".format(
str(datetime.timedelta(seconds=timeit.default_timer()-start_time)),
epoch + 1,
"(min/avg cost {:0.3f}/{:0.3f})".format(
float(float(min(costs))),
float(float(sum(costs)/float(len(costs))))
)
)
)
开发者ID:solsword,项目名称:fleece,代码行数:27,代码来源:ngen.py
注:本文中的timeit.default_timer函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论