本文整理汇总了Python中utils.logger函数的典型用法代码示例。如果您正苦于以下问题:Python logger函数的具体用法?Python logger怎么用?Python logger使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了logger函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: fixfilebase
def fixfilebase(self, f):
assert f.startswith(self.expectedpath), [f, self.expectedpath]
f = self.newpath + f[len(self.expectedpath):]
schemafile = f.split(':')[-1].split('#')[0]
if not cmn.fexists(schemafile):
logger('#err ...schema file {0} not found\n'.format(schemafile))
return f
开发者ID:IHEC,项目名称:ihec-ecosystems,代码行数:7,代码来源:validate_json.py
示例2: main
def main(args):
if not args.has('-config'):
args.add_key('-config', "./config.json")
logger(str([args.keys, args.args()]) + '\n')
if not args.has('-out'):
logger('#__noOutFileGiven___\n')
return
if not args.has('-dbg') and (cmn.fexists(args['-out']) and not args.has('-overwrite-outfile')):
logger('#__outfile:{0} exists\n'.format(args['-out']))
return
#try:
if True:
if args.has('-extract'):
import sraparse
return sraparse.SRAParseObjSet.extract_attributes_to_json(args.args())
elif args.has("-test-sample"):
testargs = ["./examples/samples.xml", "-config:{0}".format(args['-config']), "-out:./examples/samples.versioned.xml"]
validate_sample.main(Config(testargs))
elif args.has("-sample"):
validate_sample.main(args)
elif args.has("-experiment"):
validate_experiment.main(args)
else:
raise NotImplementedError("#__unknownArguments__")
else:
#except Exception as err:
logger('#__unexpected__\n')
logger(str(err.message) + '\n')
开发者ID:IHEC,项目名称:ihec-ecosystems,代码行数:33,代码来源:__main__.py
示例3: E
def E(level=1):
if level == 0:
from common import level1 as P
P = partial(P, FOnly=True) # high order function, here we only test LEVEL-1 F CNN
elif level == 1:
from level import level1 as P
elif level == 2:
from level import level2 as P
else:
from level import level3 as P
data = getDataFromTxt(TXT)
error = np.zeros((len(data), 5))
for i in range(len(data)):
imgPath, bbox, landmarkGt = data[i]
img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
assert(img is not None)
logger("process %s" % imgPath)
landmarkP = P(img, bbox)
plot_point[i] = landmarkP
# real landmark
landmarkP = bbox.reprojectLandmark(landmarkP)
landmarkGt = bbox.reprojectLandmark(landmarkGt)
error[i] = evaluateError(landmarkGt, landmarkP, bbox)
return error
开发者ID:NUAAXXY,项目名称:CNN-FaceKeyPoint-Detection,代码行数:27,代码来源:test.py
示例4: start
def start(self):
# self.start.__doc__ = self._thread.start.__doc__
if not self.started:
self.started = True
self.return_value = self.target(*self.args, **self.kwargs)
logger('fake_thread_started', self.target.__name__)
else:
raise RuntimeError()
开发者ID:njittam,项目名称:bot,代码行数:8,代码来源:command_handler.py
示例5: obj_id
def obj_id(self, e):
try:
idblock = e.get('@idblock', dict())
tags = [idblock[k] for k in ['alias', 'refname', 'accession'] if k in idblock]
return 'unknown' if not tags else self.sanitizer.filter_alphan('.'.join(tags), '.-_')
except Exception as e:
logger('#__couldNotExactId__:{0}\n'.format(e ))
return 'unknown'
开发者ID:IHEC,项目名称:ihec-ecosystems,代码行数:8,代码来源:validate_json.py
示例6: get_words
def get_words(terminals, landmarks, rel=None):
words = []
probs = []
entropy = []
for n,lmk in zip(terminals, landmarks):
# if we could not get an expansion for the LHS, we just pass down the unexpanded nonterminal symbol
# it gets the probability of 1 and entropy of 0
if n in NONTERMINALS:
words.append(n)
probs.append(1.0)
entropy.append(0.0)
continue
lmk_class = (lmk.object_class if lmk else None)
lmk_color = (lmk.color if lmk else None)
rel_class = rel_type(rel)
dist_class = (rel.measurement.best_distance_class if hasattr(rel, 'measurement') else None)
deg_class = (rel.measurement.best_degree_class if hasattr(rel, 'measurement') else None)
cp_db = CWord.get_word_counts(pos=n,
lmk_class=lmk_class,
lmk_ori_rels=get_lmk_ori_rels_str(lmk),
lmk_color=lmk_color,
rel=rel_class,
rel_dist_class=dist_class,
rel_deg_class=deg_class)
if cp_db.count() <= 0:
logger( 'Could not expand %s (lmk_class: %s, lmk_color: %s, rel: %s, dist_class: %s, deg_class: %s)' % (n, lmk_class, lmk_color, rel_class, dist_class, deg_class) )
terminals.append( n )
continue
logger( 'Expanded %s (lmk_class: %s, lmk_color: %s, rel: %s, dist_class: %s, deg_class: %s)' % (n, lmk_class, lmk_color, rel_class, dist_class, deg_class) )
ckeys, ccounts = zip(*[(cword.word,cword.count) for cword in cp_db.all()])
ccounter = {}
for cword in cp_db.all():
if cword.word in ccounter: ccounter[cword.word] += cword.count
else: ccounter[cword.word] = cword.count
ckeys, ccounts = zip(*ccounter.items())
# print 'ckeys', ckeys
# print 'ccounts', ccounts
ccounts = np.array(ccounts, dtype=float)
ccounts /= ccounts.sum()
w, w_prob, w_entropy = categorical_sample(ckeys, ccounts)
words.append(w)
probs.append(w_prob)
entropy.append(w_entropy)
p, H = np.prod(probs), np.sum(entropy)
# print 'expanding %s to %s (p: %f, H: %f)' % (terminals, words, p, H)
return words, p, H
开发者ID:colinwinslow,项目名称:bolt,代码行数:58,代码来源:sentence_from_location.py
示例7: get_sentence_meaning_likelihood
def get_sentence_meaning_likelihood(sentence, lmk, rel):
modparse = get_modparse(sentence)
t = ParentedTree.parse(modparse)
print '\n%s\n' % t.pprint()
probs, entropies, lrpc, tps = get_tree_probs(t, lmk, rel)
if np.prod(probs) == 0.0:
logger('ERROR: Probability product is 0 for sentence: %s, lmk: %s, rel: %s, probs: %s' % (sentence, lmk, rel, str(probs)))
return np.prod(probs), sum(entropies), lrpc, tps
开发者ID:marcovzla,项目名称:bolt,代码行数:9,代码来源:location_from_sentence.py
示例8: __init__
def __init__(self, sra, validators):
super(SampleValidator, self).__init__(validators)
self.normalize = lambda t: t.lower().replace(' ', '_')
self.sra = sra
self.xmljson = self.sra.obj_xmljson()
for (xml, attrs) in self.xmljson:
logger('\n#__normalizingTags:{0}\n'.format(attrs['title']))
attrs['attributes'] = self.normalize_tags(attrs['attributes'])
logger("\n\n")
开发者ID:IHEC,项目名称:ihec-ecosystems,代码行数:9,代码来源:validate_sample.py
示例9: validate_semantics
def validate_semantics(self, attrs):
attributes = attrs['attributes']
if 'donor_age_unit' in attributes and attributes['donor_age_unit'] == 'year' and isinstance(attributes['donor_age'], int):
age = int(attributes['donor_age'])
if age > 90:
logger('#__error: Donors over 90 years of age should be entered as "90+"\n')
return False
return True
开发者ID:IHEC,项目名称:ihec-ecosystems,代码行数:9,代码来源:validate_sample.py
示例10: run_once
def run_once(self, make_thread=True, last_update_id=None, update_timeout=30):
""" Check the the messages for commands and make a Thread or FakeThread with the command depending on make_thread.
Args:
make_thread:
True: the function returns a list with threads. Which didn't start yet.
False: the function returns a list with FakeThreads. Which did'nt start yet.
last_update_id:
the offset arg from getUpdates and is kept up to date within this function
update_timeout:
timeout for updates. can be None for no timeout.
Returns:
A tuple of two elements. The first element is a list with Threads or FakeThreads which didn't start yet.
The second element is the updated las_update_id
"""
if make_thread:
ch_Thread = threading.Thread
else:
ch_Thread = FakeThread
bot_name = self.bot.username
threads = {}
self._getupdates_can_write.append(True)
get_updates_index = len(self._getupdates_can_write) - 1
get_updates_thread = threading.Thread(target=self.get_updates,
kwargs={'index': get_updates_index,
'offset': last_update_id})
get_updates_thread.start()
get_updates_thread.join(timeout=update_timeout)
if get_updates_thread.isAlive():
logger('ERROR getupdates timed out, using empty list')
self._getupdates_can_write[get_updates_index] = False
self._last_updates = []
updates = self._last_updates
for update in updates:
last_update_id = update.update_id + 1
message = update.message
if len(message.text) == 0:
message.text = ' '
if message.text[0] == '/':
command, username = message.text.split(' ')[0], bot_name
if '@' in command:
command, username = command.split('@')
if username == bot_name:
command_func = self._get_command_func(command)
if command_func is not None:
self.bot.sendChatAction(chat_id=update.message.chat.id, action=telegram.ChatAction.TYPING)
if self.isValidCommand is None or self.isValidCommand(update):
t = ch_Thread(target=command_func, args=(update,))
threads[(message.text, update.message.chat.id)] = t
else:
t = ch_Thread(target=self._command_not_valid, args=(update,))
threads[(message.text + ' unauthorized', update.message.chat.id)] = t
else:
t = ch_Thread(target=self._command_not_found, args=(update,))
threads[(message.text + ' not found', update.message.chat.id)] = t
return threads, last_update_id
开发者ID:njittam,项目名称:bot,代码行数:57,代码来源:command_handler.py
示例11: from_sra_main_to_attributes
def from_sra_main_to_attributes(self, hashed):
if 'library_strategy' in hashed:
if 'LIBRARY_STRATEGY' in hashed['attributes'] or 'library_strategy' in hashed['attributes']:
lib_strat_attr = 'LIBRARY_STRATEGY' if 'LIBRARY_STRATEGY' in hashed['attributes'] else 'library_strategy'
hashed['attributes']['LIBRARY_STRATEGY_IHEC'] = hashed['attributes'][lib_strat_attr]
old_lib_start = hashed['attributes'].pop(lib_strat_attr)
logger("#warn:__library_strategy__ defined in both SRA block and as IHEC attribute:{0}, value pushed into 'LIBRARY_STRATEGY_IHEC'\n".format(old_lib_start))
hashed['attributes']['LIBRARY_STRATEGY'] = [hashed['library_strategy']]
#hashed['attributes']['@idblock'] = hashed['@idblock']
return hashed
开发者ID:IHEC,项目名称:ihec-ecosystems,代码行数:10,代码来源:sraparse.py
示例12: extract_additional_experiment_attributes
def extract_additional_experiment_attributes(self, obj, hashed):
strategy = hashed.get("library_strategy", "" ).strip()
if not strategy:
strategy = self.extract_optional(obj, ".//SEQUENCING_LIBRARY_STRATEGY")
if not strategy or len(strategy) > 1:
logger("#warn__: cannot parse 'library_strategy' or 'library_sequencing_strategy'.. {0}\n ".format(str(strategy)))
else:
logger("#warn__: updated 'library_strategy' with 'library_sequencing_strategy'.. {0}\n ".format(str(strategy[0].text)))
hashed["library_strategy"] = strategy[0].text.strip()
return hashed
开发者ID:IHEC,项目名称:ihec-ecosystems,代码行数:10,代码来源:sraparse.py
示例13: get_updates
def get_updates(self, *args, index, offset, **kwargs):
try:
temp = self.bot.getUpdates(*args, offset=offset, **kwargs)
except Exception as e:
temp = []
logger('because an error occoured updates will be empty id:', index, type(e), e.args, e)
if self._getupdates_can_write[index]:
self._last_updates = temp
else:
logger('error get_updates done. but not able to send output.', index)
return temp
开发者ID:njittam,项目名称:bot,代码行数:11,代码来源:command_handler.py
示例14: _generate_help_list
def _generate_help_list(self):
logger('methods', [attr[0] for attr in getmembers(self, predicate=ismethod)])
command_functions = [attr[1] for attr in getmembers(self, predicate=ismethod) if attr[0][:8] == 'command_' and
attr[0] not in self.skip_in_help]
help_message = ''
for command_function in command_functions:
if command_function.__doc__ is not None:
help_message += ' /' + command_function.__name__[8:] + ' - ' + command_function.__doc__ + '\n'
else:
help_message += ' /' + command_function.__name__[8:] + ' - ' + '\n'
return help_message
开发者ID:njittam,项目名称:bot,代码行数:11,代码来源:command_handler.py
示例15: probs_metric
def probs_metric(inverse=False):
rand_p = Vec2(random()*table.width+table.min_point.x, random()*table.height+table.min_point.y)
try:
bestmeaning, bestsentence = generate_sentence(rand_p, False, scene, speaker, usebest=True, golden=inverse, printing=printing)
sampled_landmark, sampled_relation = bestmeaning.args[0], bestmeaning.args[3]
golden_posteriors = get_all_sentence_posteriors(bestsentence, meanings, golden=(not inverse), printing=printing)
# lmk_prior = speaker.get_landmark_probability(sampled_landmark, landmarks, PointRepresentation(rand_p))[0]
all_lmk_probs = speaker.all_landmark_probs(landmarks, Landmark(None, PointRepresentation(rand_p), None))
all_lmk_probs = dict(zip(landmarks, all_lmk_probs))
lmk_prior = all_lmk_probs[sampled_landmark]
head_on = speaker.get_head_on_viewpoint(sampled_landmark)
rel_prior = speaker.get_probabilities_points( np.array([rand_p]), sampled_relation, head_on, sampled_landmark)
lmk_post = golden_posteriors[sampled_landmark]
rel_post = golden_posteriors[sampled_relation]
ps = np.array([golden_posteriors[lmk]*golden_posteriors[rel] for lmk, rel in meanings])
rank = None
for i,p in enumerate(ps):
lmk,rel = meanings[i]
# logger( '%f, %s' % (p, m2s(lmk,rel)))
head_on = speaker.get_head_on_viewpoint(lmk)
# ps[i] *= speaker.get_landmark_probability(lmk, landmarks, PointRepresentation(rand_p))[0]
ps[i] *= all_lmk_probs[lmk]
ps[i] *= speaker.get_probabilities_points( np.array([rand_p]), rel, head_on, lmk)
if lmk == sampled_landmark and rel == sampled_relation:
idx = i
ps += epsilon
ps = ps/ps.sum()
prob = ps[idx]
rank = sorted(ps, reverse=True).index(prob)
entropy = entropy_of_probs(ps)
except (ParseError,RuntimeError) as e:
logger( e )
lmk_prior = 0
rel_prior = 0
lmk_post = 0
rel_post = 0
prob = 0
rank = len(meanings)-1
entropy = 0
distances = [[None]]
head_on = speaker.get_head_on_viewpoint(sampled_landmark)
all_descs = speaker.get_all_meaning_descriptions(trajector, scene, sampled_landmark, sampled_relation, head_on, 1)
distances = []
for desc in all_descs:
distances.append([edit_distance( bestsentence, desc ), desc])
distances.sort()
return lmk_prior,rel_prior,lmk_post,rel_post,\
prob,entropy,rank,distances[0][0],type(sampled_relation)
开发者ID:arebgun,项目名称:bolt,代码行数:53,代码来源:object_correction_testing.py
示例16: accept_correction
def accept_correction( meaning, correction, update_func='geometric', update_scale=10 ):
(lmk, lmk_prob, lmk_ent,
rel, rel_prob, rel_ent,
rel_exp_chain, rele_prob_chain, rele_ent_chain, rel_terminals, rel_landmarks,
lmk_exp_chain, lmke_prob_chain, lmke_ent_chain, lmk_terminals, lmk_landmarks,
rel_words, relw_prob, relw_ent,
lmk_words, lmkw_prob, lmkw_ent) = meaning.args
old_meaning_prob, old_meaning_entropy, lrpc, tps = get_sentence_meaning_likelihood( correction, lmk, rel )
update = update_funcs[update_func](lmk_prob * rel_prob, old_meaning_prob, lmk_ent + rel_ent, old_meaning_entropy) * update_scale
logger('Update functions is %s and update value is: %f' % (update_func, update))
# print 'lmk_prob, lmk_ent, rel_prob, rel_ent, old_meaning_prob, old_meaning_entropy, update', lmk_prob, lmk_ent, rel_prob, rel_ent, old_meaning_prob, old_meaning_entropy, update
# print lmk.object_class, type(rel)
dec_update = -update
for lhs,rhs,parent,_ in rel_exp_chain:
# print 'Decrementing production - lhs: %s, rhs: %s, parent: %s' % (lhs,rhs,parent)
update_expansion_counts( dec_update, lhs, rhs, parent, rel=rel )
for lhs,rhs,parent,lmk in lmk_exp_chain:
# print 'Decrementing production - lhs: %s, rhs: %s, parent: %s' % (lhs,rhs,parent)
update_expansion_counts( dec_update, lhs, rhs, parent, lmk_class=(lmk.object_class if lmk else None),
lmk_ori_rels=get_lmk_ori_rels_str(lmk),
lmk_color=(lmk.color if lmk else None) )
for term,word in zip(rel_terminals,rel_words):
# print 'Decrementing word - pos: %s, word: %s, rel: %s' % (term, word, rel)
update_word_counts( dec_update, term, word, rel=rel )
for term,word,lmk in zip(lmk_terminals,lmk_words,lmk_landmarks):
# print 'Decrementing word - pos: %s, word: %s, lmk_class: %s' % (term, word, lmk.object_class)
update_word_counts( dec_update, term, word, lmk_class=lmk.object_class,
lmk_ori_rels=get_lmk_ori_rels_str(lmk),
lmk_color=(lmk.color if lmk else None) )
# reward new words with old meaning
for lhs,rhs,parent,lmk,rel in lrpc:
# print 'Incrementing production - lhs: %s, rhs: %s, parent: %s' % (lhs,rhs,parent)
update_expansion_counts( update, lhs, rhs, parent, rel=rel,
lmk_class=(lmk.object_class if lmk else None),
lmk_ori_rels=get_lmk_ori_rels_str(lmk),
lmk_color=(lmk.color if lmk else None) )
for lhs,rhs,lmk,rel in tps:
# print 'Incrementing word - pos: %s, word: %s, lmk_class: %s' % (lhs, rhs, (lmk.object_class if lmk else None) )
update_word_counts( update, lhs, rhs, lmk_class=(lmk.object_class if lmk else None),
rel=rel,
lmk_ori_rels=get_lmk_ori_rels_str(lmk),
lmk_color=(lmk.color if lmk else None) )
开发者ID:colinwinslow,项目名称:bolt,代码行数:52,代码来源:sentence_from_location.py
示例17: make_diffstring
def make_diffstring(content_ab, separator):
raw_text_input_a, raw_text_input_b = content_ab
text_input_a = raw_text_input_a.split(separator)
text_input_b = raw_text_input_b.split(separator)
# http://docs.python.org/library/difflib.html
diff_object = difflib.HtmlDiff(wrapcolumn=87)
diff_string = diff_object.make_table( text_input_a, text_input_b)
if not type(diff_string) == unicode:
logger('make_table failed')
return
return ''.join(diff_string)
开发者ID:KWMalik,项目名称:GAE-webdiff,代码行数:14,代码来源:webdiff.py
示例18: E
def E():
data = getDataFromTxt(TXT)
error = np.zeros((len(data), 3))
for i in range(len(data)):
imgPath, bbox, landmarkGt = data[i]
landmarkGt = landmarkGt[:3, :]
img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
assert(img is not None)
logger("process %s" % imgPath)
landmarkP = EN(img, bbox)
# real landmark
landmarkP = bbox.reprojectLandmark(landmarkP)
landmarkGt = bbox.reprojectLandmark(landmarkGt)
error[i] = evaluateError(landmarkGt, landmarkP, bbox)
return error
开发者ID:NUAAXXY,项目名称:CNN-FaceKeyPoint-Detection,代码行数:17,代码来源:EN.py
示例19: heatmaps_for_sentences
def heatmaps_for_sentences(sentences, all_meanings, loi_infos, xs, ys, scene, speaker, step=0.02):
printing=False
x = np.array( [list(xs-step*0.5)]*len(ys) )
y = np.array( [list(ys-step*0.5)]*len(xs) ).T
scene_bb = scene.get_bounding_box()
scene_bb = scene_bb.inflate( Vec2(scene_bb.width*0.5,scene_bb.height*0.5) )
combined_heatmaps = []
for obj_lmk, ms, heatmapss in loi_infos:
combined_heatmap = None
for sentence in sentences:
posteriors = None
while not posteriors:
try:
posteriors = get_all_sentence_posteriors(sentence, all_meanings, printing=printing)
except ParseError as pe:
raise pe
except Exception as e:
print e
sleeptime = random()*0.5
logger('Sleeping for %f and retrying "%s"' % (sleeptime,sentence))
time.sleep(sleeptime)
continue
big_heatmap1 = None
for m,(h1,h2) in zip(ms, heatmapss):
lmk,rel = m
p = posteriors[rel]*posteriors[lmk]
if big_heatmap1 is None:
big_heatmap1 = p*h1
else:
big_heatmap1 += p*h1
if combined_heatmap is None:
combined_heatmap = big_heatmap1
else:
combined_heatmap *= big_heatmap1
combined_heatmaps.append(combined_heatmap)
return combined_heatmaps
开发者ID:arebgun,项目名称:bolt,代码行数:43,代码来源:testing_testing.py
示例20: main
def main(args):
print args['-config']
outfile = args['-out']
config = json2.loadf(args['-config'])
xml_validator = XMLValidator(config["sra"]["sample"])
ihec_validators = cmn.safedict([(schema["version"] , JsonSchema(schema["schema"], args)) for schema in config["ihec"]["sample"]])
objtype = 'SAMPLE'
objset = 'SAMPLE_SET'
validated = list()
xmllist = args.args()
nObjs = 0
for e in xmllist:
sra = SRAParseObjSet.from_file(e)
nObjs += sra.nOffspring()
assert sra.xml.getroot().tag == objset, ['__Expected:' + objset]
assert sra.is_valid__xml(xml_validator) or args.has('-not-sra-xml-but-try')
v = SampleValidator(sra, ihec_validators)
validated.extend(v.is_valid_ihec())
versioned_xml = ['<{0}>'.format(objset) ]
for e in validated:
(version, xml) = e
sra_versioned = SRAParseObj(xml)
sra_versioned.add_attribute("VALIDATED_AGAINST_METADATA_SPEC", "{0}/{1}".format(version, objtype))
versioned_xml.append(sra_versioned.tostring())
versioned_xml.append('</{0}>'.format(objset))
validated_xml_file = cmn.writel(outfile, versioned_xml)
print 'written:' + validated_xml_file
print 'validated:', len(validated)
print 'failed:', nObjs - len(validated)
if validated:
validated_xml_set = SRAParseObjSet.from_file(validated_xml_file)
assert validated_xml_set.is_valid__xml(xml_validator) or args.has("-skip-updated-xml-validation")
logger('ok\n')
else:
logger('..no valid objects found\n')
开发者ID:IHEC,项目名称:ihec-ecosystems,代码行数:41,代码来源:validate_sample.py
注:本文中的utils.logger函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论