本文整理汇总了Python中synergy.db.dao.unit_of_work_dao.UnitOfWorkDao类的典型用法代码示例。如果您正苦于以下问题:Python UnitOfWorkDao类的具体用法?Python UnitOfWorkDao怎么用?Python UnitOfWorkDao使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了UnitOfWorkDao类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: create_and_insert_unit_of_work
def create_and_insert_unit_of_work(process_name, start_id, end_id, state=unit_of_work.STATE_REQUESTED,
timeperiod='INVALID_TIMEPERIOD'):
""" method creates and inserts a unit_of_work into DB
:return id of the created object in the db"""
uow = create_unit_of_work(process_name, start_id, end_id, timeperiod, state)
logger = get_logger(process_name)
uow_dao = UnitOfWorkDao(logger)
uow_id = uow_dao.insert(uow)
return uow_id
开发者ID:xiaohuangji,项目名称:scheduler,代码行数:9,代码来源:base_fixtures.py
示例2: __init__
def __init__(self, process_name):
super(GarbageCollectorWorker, self).__init__(process_name)
self.lock = Lock()
self.publishers = PublishersPool(self.logger)
self.uow_dao = UnitOfWorkDao(self.logger)
self.managed_dao = ManagedProcessDao(self.logger)
self.managed_entries = dict()
开发者ID:eggsandbeer,项目名称:scheduler,代码行数:7,代码来源:garbage_collector_worker.py
示例3: __init__
def __init__(self, scheduler):
self.scheduler = scheduler
self.timetable = scheduler.timetable
self.logger = scheduler.logger
self.uow_dao = UnitOfWorkDao(self.logger)
self.consumer = Consumer(QUEUE_UOW_REPORT)
self.main_thread = None
开发者ID:eggsandbeer,项目名称:scheduler,代码行数:7,代码来源:status_bus_listener.py
示例4: __init__
def __init__(self, logger, timetable, name):
self.name = name
self.logger = logger
self.publishers = PublishersPool(self.logger)
self.timetable = timetable
self.uow_dao = UnitOfWorkDao(self.logger)
self.job_dao = JobDao(self.logger)
开发者ID:eggsandbeer,项目名称:scheduler,代码行数:7,代码来源:abstract_state_machine.py
示例5: FreerunStatements
class FreerunStatements(object):
def __init__(self, logger, freerun_handlers):
self.lock = RLock()
self.logger = logger
self.freerun_handlers = freerun_handlers
self.uow_dao = UnitOfWorkDao(self.logger)
@thread_safe
def retrieve_records(self, timeperiod, include_running,
include_processed, include_noop, include_failed, include_disabled):
""" method looks for suitable UOW records and returns them as a dict"""
resp = dict()
try:
query = unit_of_work_dao.QUERY_GET_FREERUN_SINCE(timeperiod, include_running,
include_processed, include_noop, include_failed)
records_list = self.uow_dao.run_query(query)
if len(records_list) == 0:
self.logger.warning('MX: no Freerun UOW records found since {0}.'.format(timeperiod))
for uow_record in records_list:
if uow_record.process_name not in self.freerun_handlers:
continue
thread_handler = self.freerun_handlers[uow_record.process_name]
if not include_disabled and not thread_handler.process_entry.is_on:
continue
resp[uow_record.key] = uow_record.document
except Exception as e:
self.logger.error('MX Dashboard FreerunStatements error: {0}'.format(e))
return resp
开发者ID:xiaohuangji,项目名称:scheduler,代码行数:31,代码来源:dashboard_handler.py
示例6: setUp
def setUp(self):
self.process_name = PROCESS_ALERT_DAILY
self.logger = get_logger(self.process_name)
self.uow_id = create_and_insert_unit_of_work(self.process_name, 'range_start', 'range_end')
self.uow_id = str(self.uow_id)
self.uow_dao = UnitOfWorkDao(self.logger)
self.uow_log_dao = UowLogDao(self.logger)
开发者ID:xiaohuangji,项目名称:scheduler,代码行数:7,代码来源:test_uow_log_handler.py
示例7: __init__
def __init__(self, logger, timetable, name):
self.name = name
self.logger = logger
self.mq_transmitter = MqTransmitter(self.logger)
self.timetable = timetable
self.uow_dao = UnitOfWorkDao(self.logger)
self.job_dao = JobDao(self.logger)
开发者ID:mushkevych,项目名称:scheduler,代码行数:7,代码来源:abstract_state_machine.py
示例8: UowLogHandlerUnitTest
class UowLogHandlerUnitTest(unittest.TestCase):
""" Test flow:
1. create a UOW in the database
2. emulate mq message
3. call _mq_callback method
4. validate that all the messages are now found in the uow_log record
5. remove UOW and uow_log record
"""
def setUp(self):
self.process_name = PROCESS_ALERT_DAILY
self.logger = get_logger(self.process_name)
self.uow_id = create_and_insert_unit_of_work(self.process_name, 'range_start', 'range_end')
self.uow_id = str(self.uow_id)
self.uow_dao = UnitOfWorkDao(self.logger)
self.uow_log_dao = UowLogDao(self.logger)
def tearDown(self):
self.uow_dao.remove(self.uow_id)
self.uow_log_dao.remove(self.uow_id)
def test_logging(self):
self.worker = ChattyWorker(self.process_name)
message = TestMessage(process_name=self.process_name, uow_id=self.uow_id)
self.worker._mq_callback(message)
uow_log = self.uow_log_dao.get_one(self.uow_id)
messages = INFO_LOG_MESSAGES + WARN_LOG_MESSAGES # + STD_MESSAGES
self.assertLessEqual(len(messages), len(uow_log.log))
for index, message in enumerate(messages):
self.assertIn(message, uow_log.log[index])
def test_exception_logging(self):
self.worker = ExceptionWorker(self.process_name)
message = TestMessage(process_name=self.process_name, uow_id=self.uow_id)
self.worker._mq_callback(message)
uow_log = self.uow_log_dao.get_one(self.uow_id)
messages = ['Exception: Artificially triggered exception to test Uow Exception Logging',
'method ExceptionWorker._process_uow returned None. Assuming happy flow.',
'at INVALID_TIMEPERIOD: Success/Failure 0/0']
for index, message in enumerate(messages):
self.assertIn(message, uow_log.log[index])
开发者ID:xiaohuangji,项目名称:scheduler,代码行数:45,代码来源:test_uow_log_handler.py
示例9: __init__
def __init__(self, scheduler):
self.logger = get_logger(PROCESS_GC, append_to_console=False, redirect_stdstream=False)
self.managed_handlers = scheduler.managed_handlers
self.mq_transmitter = MqTransmitter(self.logger)
self.timetable = scheduler.timetable
self.lock = Lock()
self.uow_dao = UnitOfWorkDao(self.logger)
self.reprocess_uows = collections.defaultdict(PriorityQueue)
self.timer = RepeatTimer(settings.settings['gc_run_interval'], self._run)
开发者ID:xiaohuangji,项目名称:scheduler,代码行数:10,代码来源:garbage_collector.py
示例10: __init__
def __init__(self, logger, message, consumer, performance_ticker):
self.logger = logger
self.message = message
self.mq_request = SynergyMqTransmission.from_json(message.body)
self.consumer = consumer
self.performance_ticker = performance_ticker
self.alive = False
self.return_code = -1
self.uow_dao = UnitOfWorkDao(self.logger)
self.thread_name = str(self.mq_request)
super(BashRunnable, self).__init__(name=self.thread_name)
开发者ID:eggsandbeer,项目名称:scheduler,代码行数:12,代码来源:bash_driver.py
示例11: __init__
def __init__(self, request, **values):
super(FreerunActionHandler, self).__init__(request, **values)
self.process_name = self.request_arguments.get('process_name')
self.entry_name = self.request_arguments.get('entry_name')
self.freerun_process_dao = FreerunProcessDao(self.logger)
self.uow_dao = UnitOfWorkDao(self.logger)
self.is_request_valid = True if self.process_name and self.entry_name else False
if self.is_request_valid:
self.process_name = self.process_name.strip()
self.entry_name = self.entry_name.strip()
self.is_requested_state_on = 'is_on' in self.request_arguments and self.request_arguments['is_on']
开发者ID:eggsandbeer,项目名称:scheduler,代码行数:12,代码来源:freerun_action_handler.py
示例12: __init__
def __init__(self, logger):
self.lock = RLock()
self.logger = logger
self.uow_dao = UnitOfWorkDao(self.logger)
self.job_dao = JobDao(self.logger)
self.reprocess = dict()
# self.trees contain all of the trees and manages much of their life cycle
# remember to enlist there all trees the system is working with
self.trees = self._construct_trees_from_context()
self._register_callbacks()
self._register_dependencies()
self.load_tree()
self.build_trees()
self.validate()
开发者ID:eggsandbeer,项目名称:scheduler,代码行数:16,代码来源:timetable.py
示例13: test_select_reprocessing_candidates
def test_select_reprocessing_candidates(self):
logger = get_logger(PROCESS_UNIT_TEST)
uow_dao = UnitOfWorkDao(logger)
try:
initial_candidates = uow_dao.get_reprocessing_candidates()
except:
initial_candidates = []
try:
initial_positive_candidates = uow_dao.get_reprocessing_candidates('2010123123')
except:
initial_positive_candidates = []
positive_timeperiods = {u'2010123123': PROCESS_SITE_HOURLY, # hourly time qualifier
u'2010123100': PROCESS_SITE_DAILY, # daily time qualifier
u'2010120000': PROCESS_SITE_MONTHLY, # monthly time qualifier
u'2010000000': PROCESS_SITE_YEARLY} # yearly time qualifier
negative_timeperiods = {u'2009123123': PROCESS_SITE_HOURLY, # hourly time qualifier
u'2009123100': PROCESS_SITE_DAILY, # daily time qualifier
u'2009120000': PROCESS_SITE_MONTHLY, # monthly time qualifier
u'2009000000': PROCESS_SITE_YEARLY} # yearly time qualifier
all_timeperiods = dict()
all_timeperiods.update(positive_timeperiods)
all_timeperiods.update(negative_timeperiods)
created_uow = []
for timeperiod, process_name in all_timeperiods.items():
created_uow.append(create_and_insert_unit_of_work(process_name,
0,
1,
timeperiod=timeperiod,
state=unit_of_work.STATE_INVALID))
candidates = uow_dao.get_reprocessing_candidates('2010123123')
self.assertEqual(len(candidates) - len(initial_positive_candidates), len(positive_timeperiods))
candidates = uow_dao.get_reprocessing_candidates()
self.assertEqual(len(candidates) - len(initial_candidates), len(all_timeperiods))
for uow_id in created_uow:
uow_dao.remove(uow_id)
开发者ID:eggsandbeer,项目名称:scheduler,代码行数:44,代码来源:test_garbage_collector.py
示例14: FreerunStatements
class FreerunStatements(object):
def __init__(self, logger):
self.lock = RLock()
self.logger = logger
self.uow_dao = UnitOfWorkDao(self.logger)
@thread_safe
def retrieve_records(self, timeperiod, unprocessed_only):
""" method looks for suitable UOW records and returns them as a dict"""
resp = dict()
try:
query = unit_of_work_dao.QUERY_GET_FREERUN_SINCE(timeperiod, unprocessed_only)
records_list = self.uow_dao.run_query(query)
if len(records_list) == 0:
self.logger.warn('No Freerun UOW records found since {0}.'.format(timeperiod))
for uow_record in records_list:
resp[uow_record.key] = uow_record.document
except Exception as e:
self.logger.error('DashboardHandler error: {0}'.format(e))
return resp
开发者ID:eggsandbeer,项目名称:scheduler,代码行数:21,代码来源:dashboard_handler.py
示例15: BashRunnable
class BashRunnable(threading.Thread):
"""Process starts remote or local bash script job, supervises its execution and updates mq"""
def __init__(self, logger, message, consumer, performance_ticker):
self.logger = logger
self.message = message
self.mq_request = SynergyMqTransmission.from_json(message.body)
self.consumer = consumer
self.performance_ticker = performance_ticker
self.alive = False
self.return_code = -1
self.uow_dao = UnitOfWorkDao(self.logger)
self.thread_name = str(self.mq_request)
super(BashRunnable, self).__init__(name=self.thread_name)
def _poll_process(self):
return self.alive, self.return_code
def _start_process(self):
try:
uow = self.uow_dao.get_one(self.mq_request.unit_of_work_id)
if not uow.is_requested:
# accept only UOW in STATE_REQUESTED
self.logger.warn('Skipping unit_of_work: id %s; state %s;' % (str(self.message.body), uow.state),
exc_info=False)
self.consumer.acknowledge(self.message.delivery_tag)
return
except Exception:
self.logger.error('Safety fuse. Can not identify unit_of_work %s' % str(self.message.body), exc_info=True)
self.consumer.acknowledge(self.message.delivery_tag)
return
try:
self.logger.info('start: %s {' % self.thread_name)
self.alive = True
uow.state = unit_of_work.STATE_IN_PROGRESS
uow.started_at = datetime.utcnow()
self.uow_dao.update(uow)
fabric.operations.env.warn_only = True
fabric.operations.env.abort_on_prompts = True
fabric.operations.env.use_ssh_config = True
fabric.operations.env.host_string = uow.arguments[ARGUMENT_CMD_HOST]
command = os.path.join(uow.arguments[ARGUMENT_CMD_PATH],
uow.arguments[ARGUMENT_CMD_FILE])
command += ' %s' % uow.arguments[ARGUMENT_CMD_ARGS]
run_result = fabric.operations.run(command, pty=False)
if run_result.succeeded:
self.return_code = 0
uow.finished_at = datetime.utcnow()
uow.state = unit_of_work.STATE_PROCESSED
self.uow_dao.update(uow)
self.logger.info('Completed %s with result = %r' % (self.thread_name, self.return_code))
except Exception:
self.logger.error('Exception on starting: %s' % self.thread_name, exc_info=True)
uow.state = unit_of_work.STATE_INVALID
self.uow_dao.update(uow)
finally:
self.logger.info('}')
self.alive = False
def run(self):
try:
self._start_process()
code = None
alive = True
while alive:
alive, code = self._poll_process()
time.sleep(0.1)
if code == 0:
self.performance_ticker.tracker.increment_success()
else:
self.performance_ticker.tracker.increment_failure()
self.logger.info('BashDriver for %s return code is %r' % (self.thread_name, code))
except Exception as e:
self.performance_ticker.tracker.increment_failure()
self.logger.error('Safety fuse while processing request %r: %r' % (self.message.body, e), exc_info=True)
finally:
self.consumer.acknowledge(self.message.delivery_tag)
开发者ID:eggsandbeer,项目名称:scheduler,代码行数:87,代码来源:bash_driver.py
示例16: __init__
def __init__(self, process_name, perform_db_logging=False):
super(AbstractUowAwareWorker, self).__init__(process_name)
self.perform_db_logging = perform_db_logging
self.uow_dao = UnitOfWorkDao(self.logger)
self.mq_transmitter = MqTransmitter(self.logger)
开发者ID:xiaohuangji,项目名称:scheduler,代码行数:5,代码来源:abstract_uow_aware_worker.py
示例17: AbstractUowAwareWorker
class AbstractUowAwareWorker(AbstractMqWorker):
""" Abstract class is inherited by all workers/aggregators
that are aware of unit_of_work and capable of processing it """
def __init__(self, process_name, perform_db_logging=False):
super(AbstractUowAwareWorker, self).__init__(process_name)
self.perform_db_logging = perform_db_logging
self.uow_dao = UnitOfWorkDao(self.logger)
self.mq_transmitter = MqTransmitter(self.logger)
def __del__(self):
del self.mq_transmitter
super(AbstractUowAwareWorker, self).__del__()
# **************** Abstract Methods ************************
def _init_performance_tracker(self, logger):
self.performance_tracker = UowAwareTracker(logger)
self.performance_tracker.start()
def _process_uow(self, uow):
"""
:param uow: unit_of_work to process
:return: a tuple (number of processed items/documents/etc, desired unit_of_work state) or None
if None is returned then it is assumed that the return tuple is (0, unit_of_work.STATE_PROCESSED)
:raise an Exception if the UOW shall be marked as STATE_INVALID
"""
raise NotImplementedError('method _process_uow must be implemented by {0}'.format(self.__class__.__name__))
def _clean_up(self):
""" method is called from the *finally* clause and is suppose to clean up after the uow processing """
pass
def _mq_callback(self, message):
try:
mq_request = MqTransmission.from_json(message.body)
uow = self.uow_dao.get_one(mq_request.record_db_id)
if not uow.is_requested:
# accept only UOW in STATE_REQUESTED
self.logger.warning('Skipping UOW: id {0}; state {1};'.format(message.body, uow.state),
exc_info=False)
self.consumer.acknowledge(message.delivery_tag)
return
except Exception:
self.logger.error('Safety fuse. Can not identify UOW {0}'.format(message.body), exc_info=True)
self.consumer.acknowledge(message.delivery_tag)
return
db_log_handler = UowLogHandler(self.logger, uow.db_id)
try:
uow.state = unit_of_work.STATE_IN_PROGRESS
uow.started_at = datetime.utcnow()
self.uow_dao.update(uow)
self.performance_tracker.start_uow(uow)
if self.perform_db_logging:
db_log_handler.attach()
result = self._process_uow(uow)
if result is None:
self.logger.warning('method {0}._process_uow returned None. Assuming happy flow.'
.format(self.__class__.__name__))
number_of_aggregated_objects, target_state = 0, unit_of_work.STATE_PROCESSED
else:
number_of_aggregated_objects, target_state = result
uow.number_of_aggregated_documents = number_of_aggregated_objects
uow.number_of_processed_documents = self.performance_tracker.success_per_job
uow.finished_at = datetime.utcnow()
uow.state = target_state
self.uow_dao.update(uow)
if uow.is_finished:
self.performance_tracker.finish_uow()
else:
self.performance_tracker.cancel_uow()
except Exception as e:
fresh_uow = self.uow_dao.get_one(mq_request.record_db_id)
self.performance_tracker.cancel_uow()
if fresh_uow.is_canceled:
self.logger.warning('UOW {0} for {1}@{2} was likely marked by MX as SKIPPED. No UOW update performed.'
.format(uow.db_id, uow.process_name, uow.timeperiod), exc_info=False)
else:
self.logger.error('Safety fuse while processing UOW {0} for {1}@{2}: {3}'
.format(uow.db_id, uow.process_name, uow.timeperiod, e), exc_info=True)
uow.state = unit_of_work.STATE_INVALID
self.uow_dao.update(uow)
finally:
self.consumer.acknowledge(message.delivery_tag)
self.consumer.close()
self._clean_up()
db_log_handler.detach()
try:
self.mq_transmitter.publish_uow_status(uow)
self.logger.info('UOW *{0}* status report published into MQ'.format(uow.state))
except Exception:
self.logger.error('Error on UOW status report publishing', exc_info=True)
开发者ID:xiaohuangji,项目名称:scheduler,代码行数:99,代码来源:abstract_uow_aware_worker.py
示例18: __init__
def __init__(self, logger, name=STATE_MACHINE_FREERUN):
self.name = name
self.logger = logger
self.mq_transmitter = MqTransmitter(self.logger)
self.uow_dao = UnitOfWorkDao(self.logger)
self.sfe_dao = FreerunProcessDao(self.logger)
开发者ID:xiaohuangji,项目名称:scheduler,代码行数:6,代码来源:state_machine_freerun.py
示例19: AbstractStateMachine
class AbstractStateMachine(object):
""" Abstract state machine used to govern all processes and their states """
def __init__(self, logger, timetable, name):
self.name = name
self.logger = logger
self.publishers = PublishersPool(self.logger)
self.timetable = timetable
self.uow_dao = UnitOfWorkDao(self.logger)
self.job_dao = JobDao(self.logger)
def __del__(self):
try:
self.logger.info('Closing Flopsy Publishers Pool...')
self.publishers.close()
except Exception as e:
self.logger.error('Exception caught while closing Flopsy Publishers Pool: %s' % str(e))
def _log_message(self, level, process_name, timeperiod, msg):
""" method performs logging into log file and Timetable's tree node"""
self.timetable.add_log_entry(process_name, timeperiod, msg)
self.logger.log(level, msg)
@with_reconnect
def _insert_uow(self, process_name, start_timeperiod, end_timeperiod, start_id, end_id):
"""creates unit_of_work and inserts it into the DB
:raise DuplicateKeyError: if unit_of_work with given parameters already exists """
uow = UnitOfWork()
uow.process_name = process_name
uow.timeperiod = start_timeperiod
uow.start_id = str(start_id)
uow.end_id = str(end_id)
uow.start_timeperiod = start_timeperiod
uow.end_timeperiod = end_timeperiod
uow.created_at = datetime.utcnow()
uow.source = context.process_context[process_name].source
uow.sink = context.process_context[process_name].sink
uow.state = unit_of_work.STATE_REQUESTED
uow.unit_of_work_type = TYPE_MANAGED
uow.number_of_retries = 0
uow.arguments = context.process_context[process_name].arguments
uow.db_id = self.uow_dao.insert(uow)
msg = 'Created: UOW %s for %s in timeperiod [%s:%s).' \
% (uow.db_id, process_name, start_timeperiod, end_timeperiod)
self._log_message(INFO, process_name, start_timeperiod, msg)
return uow
def _publish_uow(self, uow):
mq_request = SynergyMqTransmission(process_name=uow.process_name, unit_of_work_id=uow.db_id)
publisher = self.publishers.get(uow.process_name)
publisher.publish(mq_request.document)
publisher.release()
msg = 'Published: UOW %r for %r in timeperiod %r.' % (uow.db_id, uow.process_name, uow.start_timeperiod)
self._log_message(INFO, uow.process_name, uow.start_timeperiod, msg)
def insert_and_publish_uow(self, process_name, start_timeperiod, end_timeperiod, start_id, end_id):
""" method creates and publishes a unit_of_work. it also handles DuplicateKeyError and attempts recovery
:return: tuple (uow, is_duplicate)
:raise UserWarning: if the recovery from DuplicateKeyError was unsuccessful
"""
is_duplicate = False
try:
uow = self._insert_uow(process_name, start_timeperiod, end_timeperiod, start_id, end_id)
except DuplicateKeyError as e:
is_duplicate = True
msg = 'Catching up with latest unit_of_work %s in timeperiod %s, because of: %r' \
% (process_name, start_timeperiod, e)
self._log_message(WARNING, process_name, start_timeperiod, msg)
uow = self.uow_dao.recover_from_duplicatekeyerror(e)
if uow is None:
msg = 'MANUAL INTERVENTION REQUIRED! Unable to locate unit_of_work for %s in %s' \
% (process_name, start_timeperiod)
self._log_message(WARNING, process_name, start_timeperiod, msg)
raise UserWarning(msg)
# publish the created/caught up unit_of_work
self._publish_uow(uow)
return uow, is_duplicate
def shallow_state_update(self, uow):
""" method does not trigger any new actions
if applicable, it will update job_record state and Timetable tree node state
:assumptions: uow is either in STATE_CANCELED or STATE_PROCESSED """
pass
def _process_state_embryo(self, job_record):
""" method that takes care of processing job records in STATE_EMBRYO state"""
pass
def _process_state_in_progress(self, job_record):
""" method that takes care of processing job records in STATE_IN_PROGRESS state"""
pass
def _process_state_final_run(self, job_record):
"""method takes care of processing job records in STATE_FINAL_RUN state"""
pass
#.........这里部分代码省略.........
开发者ID:eggsandbeer,项目名称:scheduler,代码行数:101,代码来源:abstract_state_machine.py
示例20: __init__
def __init__(self, request, **values):
super(AbstractActionHandler, self).__init__(request, **values)
self.uow_dao = UnitOfWorkDao(self.logger)
self.log_recording_dao = LogRecordingDao(self.logger)
开发者ID:mushkevych,项目名称:scheduler,代码行数:4,代码来源:abstract_action_handler.py
注:本文中的synergy.db.dao.unit_of_work_dao.UnitOfWorkDao类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论