本文整理汇总了Python中utils.log.debug函数的典型用法代码示例。如果您正苦于以下问题:Python debug函数的具体用法?Python debug怎么用?Python debug使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了debug函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: collect_files
def collect_files(self, task_id=None):
t1 = time.clock()
self.files(self.path)
self.result['no_extension'] = {'file_count': 0, 'file_list': []}
for extension, values in self.type_nums.iteritems():
extension = extension.strip()
self.result[extension] = {'file_count': len(values), 'file_list': []}
# .php : 123
log.debug('{0} : {1}'.format(extension, len(values)))
if task_id is not None:
# Store
ext = CobraExt(task_id, extension, len(values))
db.session.add(ext)
for f in self.file:
es = f.split(os.extsep)
if len(es) >= 2:
# Exists Extension
# os.extsep + es[len(es) - 1]
if f.endswith(extension):
self.result[extension]['file_list'].append(f)
else:
# Didn't have extension
self.result['no_extension']['file_count'] = int(self.result['no_extension']['file_count']) + 1
self.result['no_extension']['file_list'].append(f)
if task_id is not None:
db.session.commit()
t2 = time.clock()
self.result['file_nums'] = self.file_id
self.result['collect_time'] = t2 - t1
return self.result
开发者ID:0x24bin,项目名称:cobra,代码行数:30,代码来源:directory.py
示例2: fetch_image_from_page_data
def fetch_image_from_page_data(self):
image = None
image_file = None
if self.page_data:
content = self.page_data
elif settings.BACKED_BY_AWS.get('pages_on_s3') and self.feed.s3_page:
key = settings.S3_PAGES_BUCKET.get_key(self.feed.s3_pages_key)
compressed_content = key.get_contents_as_string()
stream = StringIO(compressed_content)
gz = gzip.GzipFile(fileobj=stream)
try:
content = gz.read()
except IOError:
content = None
else:
content = MFeedPage.get_data(feed_id=self.feed.pk)
url = self._url_from_html(content)
if not url:
try:
content = requests.get(self.feed.feed_link).content
url = self._url_from_html(content)
except (AttributeError, SocketError, requests.ConnectionError,
requests.models.MissingSchema, requests.sessions.InvalidSchema,
requests.sessions.TooManyRedirects,
requests.models.InvalidURL,
requests.models.ChunkedEncodingError,
requests.models.ContentDecodingError,
LocationParseError, OpenSSLError, PyAsn1Error), e:
logging.debug(" ---> ~SN~FRFailed~FY to fetch ~FGfeed icon~FY: %s" % e)
开发者ID:stevenatwork,项目名称:NewsBlur,代码行数:29,代码来源:icon_importer.py
示例3: count_unreads_for_subscribers
def count_unreads_for_subscribers(self, feed):
UNREAD_CUTOFF = datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
user_subs = UserSubscription.objects.filter(feed=feed,
active=True,
user__profile__last_seen_on__gte=UNREAD_CUTOFF)\
.order_by('-last_read_date')
if not user_subs.count():
return
for sub in user_subs:
if not sub.needs_unread_recalc:
sub.needs_unread_recalc = True
sub.save()
if self.options['compute_scores']:
stories = MStory.objects(story_feed_id=feed.pk,
story_date__gte=UNREAD_CUTOFF)\
.read_preference(pymongo.ReadPreference.PRIMARY)
stories = Feed.format_stories(stories, feed.pk)
logging.debug(u' ---> [%-30s] ~FYComputing scores: ~SB%s stories~SN with ~SB%s subscribers ~SN(%s/%s/%s)' % (
feed.title[:30], len(stories), user_subs.count(),
feed.num_subscribers, feed.active_subscribers, feed.premium_subscribers))
self.calculate_feed_scores_with_stories(user_subs, stories)
elif self.options.get('mongodb_replication_lag'):
logging.debug(u' ---> [%-30s] ~BR~FYSkipping computing scores: ~SB%s seconds~SN of mongodb lag' % (
feed.title[:30], self.options.get('mongodb_replication_lag')))
开发者ID:Maxxx557,项目名称:NewsBlur,代码行数:27,代码来源:feed_fetcher.py
示例4: fetch
def fetch(self):
"""
Uses feedparser to download the feed. Will be parsed later.
"""
identity = self.get_identity()
log_msg = u'%2s ---> [%-30s] Fetching feed (%d)' % (identity,
unicode(self.feed)[:30],
self.feed.id)
logging.debug(log_msg)
self.feed.set_next_scheduled_update()
etag=self.feed.etag
modified = self.feed.last_modified.utctimetuple()[:7] if self.feed.last_modified else None
if self.options.get('force') or not self.feed.fetched_once:
modified = None
etag = None
USER_AGENT = 'NewsBlur Feed Fetcher (%s subscriber%s) - %s' % (
self.feed.num_subscribers,
's' if self.feed.num_subscribers != 1 else '',
URL
)
self.fpf = feedparser.parse(self.feed.feed_address,
agent=USER_AGENT,
etag=etag,
modified=modified)
return FEED_OK, self.fpf
开发者ID:rkabir,项目名称:NewsBlur,代码行数:29,代码来源:feed_fetcher.py
示例5: save_page
def save_page(self, html):
if html and len(html) > 100:
if settings.BACKED_BY_AWS.get('pages_on_s3'):
k = Key(settings.S3_PAGES_BUCKET)
k.key = self.feed.s3_pages_key
k.set_metadata('Content-Encoding', 'gzip')
k.set_metadata('Content-Type', 'text/html')
k.set_metadata('Access-Control-Allow-Origin', '*')
out = StringIO.StringIO()
f = gzip.GzipFile(fileobj=out, mode='w')
f.write(html)
f.close()
compressed_html = out.getvalue()
k.set_contents_from_string(compressed_html)
k.set_acl('public-read')
try:
feed_page = MFeedPage.objects.get(feed_id=self.feed.pk)
feed_page.delete()
logging.debug(' --->> [%-30s] ~FYTransfering page data to S3...' % (self.feed))
except MFeedPage.DoesNotExist:
pass
self.feed.s3_page = True
self.feed.save()
else:
try:
feed_page = MFeedPage.objects.get(feed_id=self.feed.pk)
feed_page.page_data = html
feed_page.save()
except MFeedPage.DoesNotExist:
feed_page = MFeedPage.objects.create(feed_id=self.feed.pk, page_data=html)
return feed_page
开发者ID:imageoptimiser,项目名称:NewsBlur,代码行数:33,代码来源:page_importer.py
示例6: create_zip
def create_zip(archive, files):
'''Creates a zip file containing the files being backed up.'''
import zipfile
from utils.misc import add_file_hash
try:
# zipfile always follows links
with zipfile.ZipFile(archive, 'w') as zipf:
zipf.comment = 'Created by s3-backup'
for f in files:
f = f.strip()
if os.path.exists(f):
zipf.write(f)
add_file_hash(archive, f)
log.debug('Added %s.' % f)
else:
log.error('%s does not exist.' % f)
if zipf.testzip() != None:
log.error('An error occured creating the zip archive.')
except zipfile.BadZipfile:
# I assume this only happens on reads? Just in case...
log.critical('The zip file is corrupt.')
except zipfile.LargeZipFile:
log.critical('The zip file is greater than 2 GB.'
' Enable zip64 functionality.')
开发者ID:rjframe,项目名称:s3-backup,代码行数:26,代码来源:s3backup.py
示例7: fetch
def fetch(self):
""" Downloads and parses a feed.
"""
socket.setdefaulttimeout(30)
identity = self.get_identity()
log_msg = u'%2s ---> [%-30s] Fetching feed (%d)' % (identity,
unicode(self.feed)[:30],
self.feed.id)
logging.debug(log_msg)
# Check if feed still needs to be updated
# feed = Feed.objects.get(pk=self.feed.pk)
# if feed.next_scheduled_update > datetime.datetime.now() and not self.options.get('force'):
# log_msg = u' ---> Already fetched %s (%d)' % (self.feed.feed_title,
# self.feed.id)
# logging.debug(log_msg)
# feed.save_feed_history(303, "Already fetched")
# return FEED_SAME, None
# else:
self.feed.set_next_scheduled_update()
etag=self.feed.etag
modified = self.feed.last_modified.utctimetuple()[:7] if self.feed.last_modified else None
if self.options.get('force'):
modified = None
etag = None
self.fpf = feedparser.parse(self.feed.feed_address,
agent=USER_AGENT,
etag=etag,
modified=modified)
return FEED_OK, self.fpf
开发者ID:dkeskar,项目名称:NewsBlur,代码行数:34,代码来源:feed_fetcher.py
示例8: _test_error
def _test_error(self):
outgoing_error_msg = OutgoingErrorMsg(tc.TID, GENERIC_E)
data = outgoing_error_msg.encode()
tid, msg_type, msg_dict = decode(data)
incoming_error_msg = IncomingErrorMsg(msg_dict)
log.debug(incoming_error_msg.error)
assert incoming_error_msg.error == GENERIC_E
开发者ID:smoothit,项目名称:smoothit-client,代码行数:7,代码来源:test_message.py
示例9: query
def query(cls, text):
try:
cls.ES.default_indices = cls.index_name()
cls.ES.indices.refresh()
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
return []
logging.info("~FGSearch ~FCfeeds~FG by address: ~SB%s" % text)
q = MatchQuery('address', text, operator="and", type="phrase")
results = cls.ES.search(query=q, sort="num_subscribers:desc", size=5,
doc_types=[cls.type_name()])
if not results.total:
logging.info("~FGSearch ~FCfeeds~FG by title: ~SB%s" % text)
q = MatchQuery('title', text, operator="and")
results = cls.ES.search(query=q, sort="num_subscribers:desc", size=5,
doc_types=[cls.type_name()])
if not results.total:
logging.info("~FGSearch ~FCfeeds~FG by link: ~SB%s" % text)
q = MatchQuery('link', text, operator="and")
results = cls.ES.search(query=q, sort="num_subscribers:desc", size=5,
doc_types=[cls.type_name()])
return results
开发者ID:76,项目名称:NewsBlur,代码行数:26,代码来源:models.py
示例10: collect_feedback
def collect_feedback(cls):
seen_posts = set()
try:
data = urllib2.urlopen('https://forum.newsblur.com/posts.json').read()
except (urllib2.HTTPError), e:
logging.debug(" ***> Failed to collect feedback: %s" % e)
return
开发者ID:MilenkoM,项目名称:NewsBlur,代码行数:7,代码来源:models.py
示例11: test_dont_query_myself
def test_dont_query_myself(self):
log.debug('test start')
self.lookup.start()
# Ongoing queries to (sorted: oldest first):
# 155-4, 157-3,
# Queued nodes to query (sorted by log_distance to info_hash):
# 158-1, 159-0
# Notice 159-2 is kicked out from the queue
eq_(self.lookup.num_parallel_queries, 2)
nodes = [Node(tc.CLIENT_ADDR, self.lookup._my_id)]
self.lookup._on_response(*_gen_nodes_args(
tc.NODES_LD_IH[157][3],
nodes))
eq_(self.lookup._get_announce_candidates(),
[tc.NODES_LD_IH[157][3],
])
# This response triggers a new query to 158-1 (ignoring myself)
eq_(self.lookup.num_parallel_queries, 2)
# Ongoing queries to (sorted: oldest first):
# 155-4, 158-1
# Queued nodes to query (sorted by log_distance to info_hash):
# 159-0
self.lookup._on_timeout(tc.NODES_LD_IH[155][4])
# This timeout triggers a new query (to 159-0)
eq_(self.lookup.num_parallel_queries, 2)
self.lookup._on_timeout(tc.NODES_LD_IH[158][1])
# No more nodes to send queries to
eq_(self.lookup.num_parallel_queries, 1)
ok_(not self.lookup.is_done)
self.lookup._on_timeout(tc.NODES_LD_IH[159][0])
# No more nodes to send queries to
eq_(self.lookup.num_parallel_queries, 0)
ok_(self.lookup.is_done)
开发者ID:gvsurenderreddy,项目名称:smoothit-client,代码行数:33,代码来源:test_lookup_manager.py
示例12: test_different_delay
def test_different_delay(self):
# NOTICE: this test might fail if your configuration
# (interpreter/processor) is too slow
task_delays = (1, 1, 1, .5, 1, 1, 2, 1, 1, 1,
1, 1.5, 1, 1, 1, 1, .3)
expected_list = ([],
['a', 16, 3, 'b'], #9 is cancelled
['a', 0, 1, 2, 4, 5, 7, 8, 10, 12, 13, 15, 'c', 'b'],
['a', 11, 'c', 'b'],
['a', 6, 'c', 'b'],
)
tasks = [Task(delay, self.callback_f, i) \
for i, delay in enumerate(task_delays)]
for task in tasks:
self.task_m.add(task)
for i, expected in enumerate(expected_list):
while True:
task = self.task_m.consume_task()
if task is None:
break
task.fire_callbacks()
log.debug('#: %d, result: %s, expected: %s' % (i,
self.callback_order, expected))
assert self.callback_order == expected
self.callback_order = []
self.task_m.add(Task(0, self.callback_f, 'a'))
self.task_m.add(Task(.5, self.callback_f, 'b'))
self.task_m.add(Task(1, self.callback_f, 'c'))
time.sleep(.5)
tasks[9].cancel() # too late (already fired)
tasks[14].cancel() # should be cancelled
开发者ID:gvsurenderreddy,项目名称:smoothit-client,代码行数:34,代码来源:test_minitwisted.py
示例13: test_cancel
def test_cancel(self):
for i in xrange(5):
self.task_m.add(Task(.1, self.callback_f, i))
c_task = Task(.1, self.callback_f, 5)
self.task_m.add(c_task)
for i in xrange(6,10):
self.task_m.add(Task(.1, self.callback_f, i))
while True:
task = self.task_m.consume_task()
if task is None:
break
task.fire_callback()
log.debug('%s' % self.callback_order)
assert self.callback_order == []
ok_(not c_task.cancelled)
c_task.cancel()
ok_(c_task.cancelled)
time.sleep(.1)
while True:
task = self.task_m.consume_task()
if task is None:
break
task.fire_callbacks()
log.debug('%s' % self.callback_order)
assert self.callback_order == [0,1,2,3,4, 6,7,8,9]
开发者ID:gvsurenderreddy,项目名称:smoothit-client,代码行数:26,代码来源:test_minitwisted.py
示例14: query
def query(cls, feed_ids, query, order, offset, limit, strip=False):
cls.create_elasticsearch_mapping()
cls.ES.indices.refresh()
if strip:
query = re.sub(r'([^\s\w_\-])+', ' ', query) # Strip non-alphanumeric
sort = "date:desc" if order == "newest" else "date:asc"
string_q = pyes.query.QueryStringQuery(query, default_operator="AND")
feed_q = pyes.query.TermsQuery('feed_id', feed_ids[:1000])
q = pyes.query.BoolQuery(must=[string_q, feed_q])
try:
results = cls.ES.search(q, indices=cls.index_name(), doc_types=[cls.type_name()],
partial_fields={}, sort=sort, start=offset, size=limit)
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
return []
logging.info(" ---> ~FG~SNSearch ~FCstories~FG for: ~SB%s~SN (across %s feed%s)" %
(query, len(feed_ids), 's' if len(feed_ids) != 1 else ''))
try:
result_ids = [r.get_id() for r in results]
except pyes.InvalidQuery, e:
logging.info(" ---> ~FRInvalid search query \"%s\": %s" % (query, e))
return []
开发者ID:MilenkoM,项目名称:NewsBlur,代码行数:25,代码来源:models.py
示例15: main
def main():
lang = 'zh'
if len(sys.argv) == 2:
lang = sys.argv[1]
cd = sys.path[0]
translation_path = os.path.join(cd, '../translation')
# load lua
pregame_file = os.path.join(translation_path, 'en_pregame.lua')
client_file = os.path.join(translation_path, 'en_client.lua')
ui_mgr = UiMgr()
log.debug('loading lua file %s' % pregame_file)
ui_mgr.load_lua_file(pregame_file)
log.debug('loading lua file %s' % client_file)
ui_mgr.load_lua_file(client_file)
log.info('read %d lines.' % len(ui_mgr.ui_lines))
# save merged lines
translate_file = os.path.join(translation_path, '%s_translate.txt' % lang)
if os.path.exists(translate_file):
choose = input('%s_translate.txt file exists, merge? [y/N]' % lang)
choose = choose.lower().strip()
if choose != '' and choose[0] == 'y':
log.info('merging to translate file.')
ui_mgr.apply_translate_from_txt_file(translate_file)
else:
log.info('skipped.')
return
with open(translate_file, 'wt', encoding='utf-8') as fp:
fp.writelines(ui_mgr.get_txt_lines(replace=True))
log.info('save translate file succeed.')
开发者ID:esozh,项目名称:eso_zh_ui,代码行数:34,代码来源:convert_lua_to_txt.py
示例16: get
def get(self, *args, **kwargs):
try:
return super(UserSubscriptionManager, self).get(*args, **kwargs)
except self.model.DoesNotExist:
if isinstance(kwargs.get('feed'), int):
feed_id = kwargs.get('feed')
elif 'feed' in kwargs:
feed_id = kwargs['feed'].pk
elif 'feed__pk' in kwargs:
feed_id = kwargs['feed__pk']
elif 'feed_id' in kwargs:
feed_id = kwargs['feed_id']
dupe_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
if dupe_feed:
feed = dupe_feed[0].feed
if 'feed' in kwargs:
kwargs['feed'] = feed
elif 'feed__pk' in kwargs:
kwargs['feed__pk'] = feed.pk
elif 'feed_id' in kwargs:
kwargs['feed_id'] = feed.pk
user = kwargs.get('user')
if isinstance(user, int):
user = User.objects.get(pk=user)
logging.debug(" ---> [%s] ~BRFound dupe UserSubscription: ~SB%s (%s)" % (user and user.username, feed, feed_id))
return super(UserSubscriptionManager, self).get(*args, **kwargs)
else:
exc_info = sys.exc_info()
raise exc_info[0], None, exc_info[2]
开发者ID:0077cc,项目名称:NewsBlur,代码行数:29,代码来源:managers.py
示例17: count_unreads_for_subscribers
def count_unreads_for_subscribers(self, feed):
UNREAD_CUTOFF = datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
user_subs = UserSubscription.objects.filter(feed=feed,
active=True,
user__profile__last_seen_on__gte=UNREAD_CUTOFF)\
.order_by('-last_read_date')
logging.debug(u' ---> [%-30s] Computing scores: %s (%s/%s/%s) subscribers' % (
unicode(feed)[:30], user_subs.count(),
feed.num_subscribers, feed.active_subscribers, feed.premium_subscribers))
if self.options['slave_db']:
slave_db = self.options['slave_db']
stories_db_orig = slave_db.stories.find({
"story_feed_id": feed.pk,
"story_date": {
"$gte": UNREAD_CUTOFF,
},
})
stories_db = []
for story in stories_db_orig:
stories_db.append(bunch(story))
else:
stories_db = MStory.objects(story_feed_id=feed.pk,
story_date__gte=UNREAD_CUTOFF)
for sub in user_subs:
cache.delete('usersub:%s' % sub.user_id)
sub.needs_unread_recalc = True
sub.save()
if self.options['compute_scores']:
for sub in user_subs:
silent = False if self.options['verbose'] >= 2 else True
sub.calculate_feed_scores(silent=silent, stories_db=stories_db)
开发者ID:rjmolesa,项目名称:NewsBlur,代码行数:34,代码来源:feed_fetcher.py
示例18: do_backup
def do_backup(schedule, follow_links):
'''Handles the backup.'''
from shutil import rmtree
import utils.filesystem
if schedule == 'daily':
backup_list = config.daily_backup_list
elif schedule == 'weekly':
backup_list = config.weekly_backup_list
else:
backup_list = config.monthly_backup_list
try:
files = utils.filesystem.read_file_list(backup_list)
archive_path, tar_type = create_archive(files, follow_links)
if config.enc_backup == True:
# We don't add the enc extension to the key - the metadata
# will tell us whether the archive is encrypted.
enc_file = utils.encrypt.encrypt_file(config.enc_key,
archive_path, config.enc_piece_size)
send_backup(enc_file, tar_type, schedule)
# Delete the plaintext local version
os.remove(archive_path)
else: # Not encrypting
send_backup(archive_path, tar_type, schedule)
if config.delete_archive_when_finished == True:
log.debug('Deleting archive.')
rmtree(config.dest_location)
except IOError:
raise
log.critical('Cannot open file: %s' % backup_list)
sys.exit(1)
开发者ID:rjframe,项目名称:s3-backup,代码行数:34,代码来源:s3backup.py
示例19: process_response
def process_response(self, request, response):
if not self.activated(request): return response
if connection.queries:
time_elapsed = sum([float(q['time']) for q in connection.queries])
queries = connection.queries
for query in queries:
if query.get('mongo'):
query['sql'] = "~FM%s: %s" % (query['mongo']['collection'], query['mongo']['query'])
elif query.get('redis'):
query['sql'] = "~FC%s" % (query['redis']['query'])
else:
query['sql'] = re.sub(r'SELECT (.*?) FROM', 'SELECT * FROM', query['sql'])
query['sql'] = re.sub(r'SELECT', '~FYSELECT', query['sql'])
query['sql'] = re.sub(r'INSERT', '~FGINSERT', query['sql'])
query['sql'] = re.sub(r'UPDATE', '~FY~SBUPDATE', query['sql'])
query['sql'] = re.sub(r'DELETE', '~FR~SBDELETE', query['sql'])
t = Template("{% for sql in sqllog %}{% if not forloop.first %} {% endif %}[{{forloop.counter}}] ~FC{{sql.time}}s~FW: {{sql.sql|safe}}{% if not forloop.last %}\n{% endif %}{% endfor %}")
if settings.DEBUG:
logging.debug(t.render(Context({
'sqllog': queries,
'count': len(queries),
'time': time_elapsed,
})))
times_elapsed = {
'sql': sum([float(q['time'])
for q in queries if not q.get('mongo') and
not q.get('redis')]),
'mongo': sum([float(q['time']) for q in queries if q.get('mongo')]),
'redis': sum([float(q['time']) for q in queries if q.get('redis')]),
}
setattr(request, 'sql_times_elapsed', times_elapsed)
return response
开发者ID:booox,项目名称:NewsBlur,代码行数:32,代码来源:middleware.py
示例20: check_urls_against_pushed_data
def check_urls_against_pushed_data(self, parsed):
if hasattr(parsed.feed, 'links'): # single notification
hub_url = self.hub
self_url = self.topic
for link in parsed.feed.links:
href = link.get('href', '')
if any(w in href for w in ['wp-admin', 'wp-cron']):
continue
if link['rel'] == 'hub':
hub_url = link['href']
elif link['rel'] == 'self':
self_url = link['href']
needs_update = False
if hub_url and self.hub != hub_url:
# hub URL has changed; let's update our subscription
needs_update = True
elif self_url != self.topic:
# topic URL has changed
needs_update = True
if needs_update:
logging.debug(u' ---> [%-30s] ~FR~BKUpdating PuSH hub/topic: %s / %s' % (
unicode(self.feed)[:30], hub_url, self_url))
expiration_time = self.lease_expires - datetime.now()
seconds = expiration_time.days*86400 + expiration_time.seconds
PushSubscription.objects.subscribe(
self_url, feed=self.feed, hub=hub_url,
lease_seconds=seconds)
开发者ID:jefftriplett,项目名称:NewsBlur,代码行数:30,代码来源:models.py
注:本文中的utils.log.debug函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论