本文整理汇总了Python中mo_threads.Thread类的典型用法代码示例。如果您正苦于以下问题:Python Thread类的具体用法?Python Thread怎么用?Python Thread使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Thread类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, name):
self.name = name
self.lock = Lock("rate locker")
self.request_rate = 0.0
self.last_request = Date.now()
Thread.run("rate logger", self._daemon)
开发者ID:rv404674,项目名称:TUID,代码行数:7,代码来源:rate_logger.py
示例2: __init__
def __init__(self, rate=None, amortization_period=None, source=None, database=None, kwargs=None):
self.amortization_period = coalesce(amortization_period, AMORTIZATION_PERIOD)
self.rate = coalesce(rate, HG_REQUEST_PER_SECOND)
self.cache_locker = Lock()
self.cache = {} # MAP FROM url TO (ready, headers, response, timestamp) PAIR
self.no_cache = {} # VERY SHORT TERM CACHE
self.workers = []
self.todo = Queue(APP_NAME+" todo")
self.requests = Queue(APP_NAME + " requests", max=int(self.rate * self.amortization_period.seconds))
self.url = URL(source.url)
self.db = Sqlite(database)
self.inbound_rate = RateLogger("Inbound")
self.outbound_rate = RateLogger("hg.mo")
if not self.db.query("SELECT name FROM sqlite_master WHERE type='table'").data:
with self.db.transaction() as t:
t.execute(
"CREATE TABLE cache ("
" path TEXT PRIMARY KEY, "
" headers TEXT, "
" response TEXT, "
" timestamp REAL "
")"
)
self.threads = [
Thread.run(APP_NAME+" worker" + text_type(i), self._worker)
for i in range(CONCURRENCY)
]
self.limiter = Thread.run(APP_NAME+" limiter", self._rate_limiter)
self.cleaner = Thread.run(APP_NAME+" cleaner", self._cache_cleaner)
开发者ID:rv404674,项目名称:TUID,代码行数:31,代码来源:cache.py
示例3: __init__
def __init__(self, host, index, alias=None, name=None, port=9200, kwargs=None):
global _elasticsearch
if hasattr(self, "settings"):
return
from pyLibrary.queries.containers.list_usingPythonList import ListContainer
from pyLibrary.env import elasticsearch as _elasticsearch
self.settings = kwargs
self.default_name = coalesce(name, alias, index)
self.default_es = _elasticsearch.Cluster(kwargs=kwargs)
self.todo = Queue("refresh metadata", max=100000, unique=True)
self.es_metadata = Null
self.last_es_metadata = Date.now()-OLD_METADATA
self.meta=Data()
table_columns = metadata_tables()
column_columns = metadata_columns()
self.meta.tables = ListContainer("meta.tables", [], wrap({c.names["."]: c for c in table_columns}))
self.meta.columns = ColumnList()
self.meta.columns.insert(column_columns)
self.meta.columns.insert(table_columns)
# TODO: fix monitor so it does not bring down ES
if ENABLE_META_SCAN:
self.worker = Thread.run("refresh metadata", self.monitor)
else:
self.worker = Thread.run("refresh metadata", self.not_monitor)
return
开发者ID:klahnakoski,项目名称:SpotManager,代码行数:29,代码来源:meta.py
示例4: capture_termination_signal
def capture_termination_signal(please_stop):
"""
WILL SIGNAL please_stop WHEN THIS AWS INSTANCE IS DUE FOR SHUTDOWN
"""
def worker(please_stop):
seen_problem = False
while not please_stop:
request_time = (time.time() - timer.START)/60 # MINUTES
try:
response = requests.get("http://169.254.169.254/latest/meta-data/spot/termination-time")
seen_problem = False
if response.status_code not in [400, 404]:
Log.alert("Shutdown AWS Spot Node {{name}} {{type}}", name=machine_metadata.name, type=machine_metadata.aws_instance_type)
please_stop.go()
except Exception as e:
e = Except.wrap(e)
if "Failed to establish a new connection: [Errno 10060]" in e or "A socket operation was attempted to an unreachable network" in e:
Log.note("AWS Spot Detection has shutdown, probably not a spot node, (http://169.254.169.254 is unreachable)")
return
elif seen_problem:
# IGNORE THE FIRST PROBLEM
Log.warning("AWS shutdown detection has more than one consecutive problem: (last request {{time|round(1)}} minutes since startup)", time=request_time, cause=e)
seen_problem = True
(Till(seconds=61) | please_stop).wait()
(Till(seconds=11) | please_stop).wait()
Thread.run("listen for termination", worker)
开发者ID:klahnakoski,项目名称:pyLibrary,代码行数:28,代码来源:__init__.py
示例5: queue_consumer
def queue_consumer(pull_queue, please_stop=None):
queue = aws.Queue(pull_queue)
time_offset = None
request_count = 0
while not please_stop:
request = queue.pop(till=please_stop)
if please_stop:
break
if not request:
Log.note("Nothing in queue, pausing for 5 seconds...")
(please_stop | Till(seconds=5)).wait()
continue
if SKIP_TRY_REQUESTS and 'try' in request.where['and'].eq.branch:
Log.note("Skipping try revision.")
queue.commit()
continue
now = Date.now().unix
if time_offset is None:
time_offset = now - request.meta.request_time
next_request = request.meta.request_time + time_offset
if next_request > now:
Log.note("Next request in {{wait_time}}", wait_time=Duration(seconds=next_request - now))
Till(till=next_request).wait()
Thread.run("request "+text_type(request_count), one_request, request)
request_count += 1
queue.commit()
开发者ID:rv404674,项目名称:TUID,代码行数:31,代码来源:sqs_consumer.py
示例6: inners
def inners():
for t in data.hits.hits:
for i in t.inner_hits[literal_field(query_path)].hits.hits:
t._inner = i._source
for k, e in post_expressions.items():
t[k] = e(t)
yield t
if more_filter:
Thread.join(need_more)
for t in more[0].hits.hits:
yield t
开发者ID:rv404674,项目名称:TUID,代码行数:11,代码来源:deep.py
示例7: __exit__
def __exit__(self, exc_type, exc_val, exc_tb):
Log.note("clean pulse exit")
self.please_stop.go()
with suppress_exception:
self.target_queue.close()
Log.note("stop put into queue")
try:
self.pulse.disconnect()
except Exception as e:
Log.warning("Can not disconnect during pulse exit, ignoring", e)
Thread.__exit__(self, exc_type, exc_val, exc_tb)
开发者ID:rv404674,项目名称:TUID,代码行数:12,代码来源:pulse.py
示例8: StructuredLogger_usingThreadedStream
class StructuredLogger_usingThreadedStream(StructuredLogger):
# stream CAN BE AN OBJCET WITH write() METHOD, OR A STRING
# WHICH WILL eval() TO ONE
def __init__(self, stream):
assert stream
if isinstance(stream, text_type):
name = stream
stream = self.stream = eval(stream)
if name.startswith("sys.") and PY3:
self.stream = Data(write=lambda d: stream.write(d.decode('utf8')))
else:
name = "stream"
self.stream = stream
# WRITE TO STREAMS CAN BE *REALLY* SLOW, WE WILL USE A THREAD
from mo_threads import Queue
def utf8_appender(value):
if isinstance(value, text_type):
value = value.encode('utf8')
self.stream.write(value)
appender = utf8_appender
self.queue = Queue("queue for " + self.__class__.__name__ + "(" + name + ")", max=10000, silent=True)
self.thread = Thread("log to " + self.__class__.__name__ + "(" + name + ")", time_delta_pusher, appender=appender, queue=self.queue, interval=0.3)
self.thread.parent.remove_child(self.thread) # LOGGING WILL BE RESPONSIBLE FOR THREAD stop()
self.thread.start()
def write(self, template, params):
try:
self.queue.add({"template": template, "params": params})
return self
except Exception as e:
raise e # OH NO!
def stop(self):
try:
self.queue.add(THREAD_STOP) # BE PATIENT, LET REST OF MESSAGE BE SENT
self.thread.join()
except Exception as e:
if DEBUG_LOGGING:
raise e
try:
self.queue.close()
except Exception as f:
if DEBUG_LOGGING:
raise f
开发者ID:rv404674,项目名称:TUID,代码行数:50,代码来源:log_usingThreadedStream.py
示例9: __init__
def __init__(self):
self.out_of_memory_restart = False
self.total_locker = Lock()
self.total_files_requested = 0
self.total_tuids_mapped = 0
self.threads_locker = Lock()
self.waiting = 0
self.threads_waiting = 0
self.requests_locker = Lock()
self.requests_total = 0
self.requests_complete = 0
self.requests_incomplete = 0
self.requests_passed = 0
self.requests_failed = 0
self.prev_mem = 0
self.curr_mem = 0
self.initial_growth = {}
Thread.run("pc-daemon", self.run_pc_daemon)
Thread.run("threads-daemon", self.run_threads_daemon)
Thread.run("memory-daemon", self.run_memory_daemon)
Thread.run("requests-daemon", self.run_requests_daemon)
开发者ID:rv404674,项目名称:TUID,代码行数:26,代码来源:statslogger.py
示例10: StructuredLogger_usingThread
class StructuredLogger_usingThread(StructuredLogger):
def __init__(self, logger):
if not isinstance(logger, StructuredLogger):
Log.error("Expecting a StructuredLogger")
self.queue = Queue("Queue for " + self.__class__.__name__, max=10000, silent=True, allow_add_after_close=True)
self.logger = logger
def worker(logger, please_stop):
try:
while not please_stop:
logs = self.queue.pop_all()
if not logs:
(Till(seconds=1) | please_stop).wait()
continue
for log in logs:
if log is THREAD_STOP:
please_stop.go()
else:
logger.write(**log)
except Exception as e:
print("problem in " + StructuredLogger_usingThread.__name__ + ": " + str(e))
finally:
Log.note("stop the child")
logger.stop()
self.thread = Thread("Thread for " + self.__class__.__name__, worker, logger)
self.thread.parent.remove_child(self.thread) # LOGGING WILL BE RESPONSIBLE FOR THREAD stop()
self.thread.start()
def write(self, template, params):
try:
self.queue.add({"template": template, "params": params})
return self
except Exception as e:
e = Except.wrap(e)
raise e # OH NO!
def stop(self):
try:
self.queue.add(THREAD_STOP) # BE PATIENT, LET REST OF MESSAGE BE SENT
self.thread.join()
Log.note("joined on thread")
except Exception as e:
Log.note("problem in threaded logger" + str(e))
with suppress_exception:
self.queue.close()
开发者ID:klahnakoski,项目名称:pyLibrary,代码行数:49,代码来源:log_usingThread.py
示例11: __init__
def __init__(self, name):
Table.__init__(self, "meta.columns")
self.db_file = File("metadata." + name + ".sqlite")
self.data = {} # MAP FROM ES_INDEX TO (abs_column_name to COLUMNS)
self.locker = Lock()
self._schema = None
self.db = sqlite3.connect(
database=self.db_file.abspath, check_same_thread=False, isolation_level=None
)
self.last_load = Null
self.todo = Queue(
"update columns to db"
) # HOLD (action, column) PAIR, WHERE action in ['insert', 'update']
self._db_load()
Thread.run("update " + name, self._db_worker)
开发者ID:klahnakoski,项目名称:pyLibrary,代码行数:15,代码来源:meta.py
示例12: __init__
def __init__(self, name, config):
config = wrap(config)
if config.debug.logs:
Log.error("not allowed to configure logging on other process")
self.process = Process(name, [PYTHON, "mo_threads" + os.sep + "python_worker.py"], shell=True)
self.process.stdin.add(value2json(set_default({"debug": {"trace": True}}, config)))
self.lock = Lock("wait for response from "+name)
self.current_task = None
self.current_response = None
self.current_error = None
self.daemon = Thread.run("", self._daemon)
self.errors = Thread.run("", self._stderr)
开发者ID:klahnakoski,项目名称:pyLibrary,代码行数:15,代码来源:python.py
示例13: __init__
def __init__(self, stream):
assert stream
if isinstance(stream, text_type):
name = stream
stream = self.stream = eval(stream)
if name.startswith("sys.") and PY3:
self.stream = Data(write=lambda d: stream.write(d.decode('utf8')))
else:
name = "stream"
self.stream = stream
# WRITE TO STREAMS CAN BE *REALLY* SLOW, WE WILL USE A THREAD
from mo_threads import Queue
def utf8_appender(value):
if isinstance(value, text_type):
value = value.encode('utf8')
self.stream.write(value)
appender = utf8_appender
self.queue = Queue("queue for " + self.__class__.__name__ + "(" + name + ")", max=10000, silent=True)
self.thread = Thread("log to " + self.__class__.__name__ + "(" + name + ")", time_delta_pusher, appender=appender, queue=self.queue, interval=0.3)
self.thread.parent.remove_child(self.thread) # LOGGING WILL BE RESPONSIBLE FOR THREAD stop()
self.thread.start()
开发者ID:rv404674,项目名称:TUID,代码行数:26,代码来源:log_usingThreadedStream.py
示例14: __init__
def __init__(
self,
host,
index,
port=9200,
type="log",
queue_size=1000,
batch_size=100,
kwargs=None,
):
"""
settings ARE FOR THE ELASTICSEARCH INDEX
"""
kwargs.timeout = Duration(coalesce(kwargs.timeout, "30second")).seconds
kwargs.retry.times = coalesce(kwargs.retry.times, 3)
kwargs.retry.sleep = Duration(coalesce(kwargs.retry.sleep, MINUTE)).seconds
kwargs.host = Random.sample(listwrap(host), 1)[0]
schema = json2value(value2json(SCHEMA), leaves=True)
schema.mappings[type].properties["~N~"].type = "nested"
self.es = Cluster(kwargs).get_or_create_index(
schema=schema,
limit_replicas=True,
typed=True,
kwargs=kwargs,
)
self.batch_size = batch_size
self.es.add_alias(coalesce(kwargs.alias, kwargs.index))
self.queue = Queue("debug logs to es", max=queue_size, silent=True)
self.worker = Thread.run("add debug logs to es", self._insert_loop)
开发者ID:klahnakoski,项目名称:pyLibrary,代码行数:31,代码来源:log_usingElasticSearch.py
示例15: __init__
def __init__(self, host, index, port=9200, type="log", max_size=1000, batch_size=100, kwargs=None):
"""
settings ARE FOR THE ELASTICSEARCH INDEX
"""
self.es = Cluster(kwargs).get_or_create_index(
schema=mo_json.json2value(value2json(SCHEMA), leaves=True),
limit_replicas=True,
tjson=True,
kwargs=kwargs
)
self.batch_size = batch_size
self.es.add_alias(coalesce(kwargs.alias, kwargs.index))
self.queue = Queue("debug logs to es", max=max_size, silent=True)
self.es.settings.retry.times = coalesce(self.es.settings.retry.times, 3)
self.es.settings.retry.sleep = Duration(coalesce(self.es.settings.retry.sleep, MINUTE))
Thread.run("add debug logs to es", self._insert_loop)
开发者ID:klahnakoski,项目名称:SpotManager,代码行数:16,代码来源:log_usingElasticSearch.py
示例16: __init__
def __init__(self, logger):
if not isinstance(logger, StructuredLogger):
Log.error("Expecting a StructuredLogger")
self.queue = Queue("Queue for " + self.__class__.__name__, max=10000, silent=True, allow_add_after_close=True)
self.logger = logger
def worker(logger, please_stop):
try:
while not please_stop:
logs = self.queue.pop_all()
if not logs:
(Till(seconds=1) | please_stop).wait()
continue
for log in logs:
if log is THREAD_STOP:
please_stop.go()
else:
logger.write(**log)
except Exception as e:
print("problem in " + StructuredLogger_usingThread.__name__ + ": " + str(e))
finally:
Log.note("stop the child")
logger.stop()
self.thread = Thread("Thread for " + self.__class__.__name__, worker, logger)
self.thread.parent.remove_child(self.thread) # LOGGING WILL BE RESPONSIBLE FOR THREAD stop()
self.thread.start()
开发者ID:klahnakoski,项目名称:pyLibrary,代码行数:28,代码来源:log_usingThread.py
示例17: setup
def setup(
self,
instance, # THE boto INSTANCE OBJECT FOR THE MACHINE TO SETUP
utility # THE utility OBJECT FOUND IN CONFIG
):
with self.locker:
if not self.settings.setup_timeout:
Log.error("expecting instance.setup_timeout to prevent setup from locking")
def worker(please_stop):
cpu_count = int(round(utility.cpu))
with hide('output'):
Log.note("setup {{instance}}", instance=instance.id)
self._config_fabric(instance)
Log.note("update packages on {{instance}} ip={{ip}}", instance=instance.id, ip=instance.ip_address)
try:
self._update_ubuntu_packages()
except Exception as e:
Log.warning("Can not setup {{instance}}, type={{type}}", instance=instance.id, type=instance.instance_type, cause=e)
return
Log.note("setup etl on {{instance}}", instance=instance.id)
self._setup_etl_code()
Log.note("setup grcov on {{instance}}", instance=instance.id)
self._setup_grcov()
Log.note("add config file on {{instance}}", instance=instance.id)
self._add_private_file()
Log.note("setup supervisor on {{instance}}", instance=instance.id)
self._setup_etl_supervisor(cpu_count)
Log.note("setup done {{instance}}", instance=instance.id)
worker_thread = Thread.run("etl setup started at "+unicode(Date.now().format()), worker)
(Till(timeout=Duration(self.settings.setup_timeout).seconds) | worker_thread.stopped).wait()
if not worker_thread.stopped:
Log.error("critical failure in thread {{name|quote}}", name=worker_thread.name)
worker_thread.join()
开发者ID:klahnakoski,项目名称:SpotManager,代码行数:35,代码来源:etl.py
示例18: _find_revision
def _find_revision(self, revision):
please_stop = False
locker = Lock()
output = []
queue = Queue("branches", max=2000)
queue.extend(b for b in self.branches if b.locale == DEFAULT_LOCALE and b.name in ["try", "mozilla-inbound", "autoland"])
queue.add(THREAD_STOP)
problems = []
def _find(please_stop):
for b in queue:
if please_stop:
return
try:
url = b.url + "json-info?node=" + revision
rev = self.get_revision(Revision(branch=b, changeset={"id": revision}))
with locker:
output.append(rev)
Log.note("Revision found at {{url}}", url=url)
except Exception as f:
problems.append(f)
threads = []
for i in range(3):
threads.append(Thread.run("find changeset " + text_type(i), _find, please_stop=please_stop))
for t in threads:
with assert_no_exception:
t.join()
return output
开发者ID:rv404674,项目名称:TUID,代码行数:31,代码来源:hg_mozilla_org.py
示例19: __init__
def __init__(self, host, index, sql_file='metadata.sqlite', alias=None, name=None, port=9200, kwargs=None):
if hasattr(self, "settings"):
return
self.too_old = TOO_OLD
self.settings = kwargs
self.default_name = coalesce(name, alias, index)
self.es_cluster = elasticsearch.Cluster(kwargs=kwargs)
self.index_does_not_exist = set()
self.todo = Queue("refresh metadata", max=100000, unique=True)
self.index_to_alias = Relation_usingList()
self.es_metadata = Null
self.metadata_last_updated = Date.now() - OLD_METADATA
self.meta = Data()
self.meta.columns = ColumnList()
self.alias_to_query_paths = {
"meta.columns": [['.']],
"meta.tables": [['.']]
}
self.alias_last_updated = {
"meta.columns": Date.now(),
"meta.tables": Date.now()
}
table_columns = metadata_tables()
self.meta.tables = ListContainer(
"meta.tables",
[
# TableDesc("meta.columns", None, ".", Date.now()),
# TableDesc("meta.tables", None, ".", Date.now())
],
jx_base.Schema(".", table_columns)
)
self.meta.columns.extend(table_columns)
# TODO: fix monitor so it does not bring down ES
if ENABLE_META_SCAN:
self.worker = Thread.run("refresh metadata", self.monitor)
else:
self.worker = Thread.run("refresh metadata", self.not_monitor)
return
开发者ID:rv404674,项目名称:TUID,代码行数:44,代码来源:meta.py
示例20: __init__
def __init__(
self,
hg=None, # CONNECT TO hg
repo=None, # CONNECTION INFO FOR ES CACHE
branches=None, # CONNECTION INFO FOR ES CACHE
use_cache=False, # True IF WE WILL USE THE ES FOR DOWNLOADING BRANCHES
timeout=30 * SECOND,
kwargs=None
):
if not _hg_branches:
_late_imports()
self.es_locker = Lock()
self.todo = mo_threads.Queue("todo for hg daemon", max=DAEMON_QUEUE_SIZE)
self.settings = kwargs
self.timeout = Duration(timeout)
# VERIFY CONNECTIVITY
with Explanation("Test connect with hg"):
response = http.head(self.settings.hg.url)
if branches == None:
self.branches = _hg_branches.get_branches(kwargs=kwargs)
self.es = None
return
self.last_cache_miss = Date.now()
set_default(repo, {"schema": revision_schema})
self.es = elasticsearch.Cluster(kwargs=repo).get_or_create_index(kwargs=repo)
def setup_es(please_stop):
with suppress_exception:
self.es.add_alias()
with suppress_exception:
self.es.set_refresh_interval(seconds=1)
Thread.run("setup_es", setup_es)
self.branches = _hg_branches.get_branches(kwargs=kwargs)
self.timeout = timeout
Thread.run("hg daemon", self._daemon)
开发者ID:rv404674,项目名称:TUID,代码行数:43,代码来源:hg_mozilla_org.py
注:本文中的mo_threads.Thread类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论