本文整理汇总了Python中pyLibrary.thread.threads.Thread类的典型用法代码示例。如果您正苦于以下问题:Python Thread类的具体用法?Python Thread怎么用?Python Thread使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Thread类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: worker
def worker(please_stop):
pending = Queue("pending ids", max=BATCH_SIZE*3, silent=False)
pending_thread = Thread.run(
"get pending",
get_pending,
source=source,
since=last_updated,
pending_bugs=pending,
please_stop=please_stop
)
diff_thread = Thread.run(
"diff",
diff,
source,
destination,
pending,
please_stop=please_stop
)
replication_thread = Thread.run(
"replication",
replicate,
source,
destination,
pending,
config.fix,
please_stop=please_stop
)
pending_thread.join()
diff_thread.join()
pending.add(Thread.STOP)
replication_thread.join()
done.go()
please_stop.go()
开发者ID:klahnakoski,项目名称:esReplicate,代码行数:34,代码来源:replicate.py
示例2: TextLog_usingThread
class TextLog_usingThread(TextLog):
def __init__(self, logger):
if not _Log:
_delayed_imports()
self.queue = _Queue("logs", max=10000, silent=True)
self.logger = logger
def worker(please_stop):
while not please_stop:
Thread.sleep(1)
logs = self.queue.pop_all()
for log in logs:
if log is Thread.STOP:
if DEBUG_LOGGING:
sys.stdout.write("TextLog_usingThread.worker() sees stop, filling rest of queue\n")
please_stop.go()
else:
self.logger.write(**log)
self.thread = Thread("log thread", worker)
self.thread.parent.remove_child(self.thread) # LOGGING WILL BE RESPONSIBLE FOR THREAD stop()
self.thread.start()
def write(self, template, params):
try:
self.queue.add({"template": template, "params": params})
return self
except Exception, e:
e = _Except.wrap(e)
sys.stdout.write("IF YOU SEE THIS, IT IS LIKELY YOU FORGOT TO RUN Log.start() FIRST\n")
raise e # OH NO!
开发者ID:klahnakoski,项目名称:MoDataSubmission,代码行数:33,代码来源:text_logs.py
示例3: _worker
def _worker(self, please_stop):
curr = "0.0"
acc = []
last_count_written = -1
next_write = Date.now()
while not please_stop:
d = self.temp_queue.pop(timeout=MINUTE)
if d == None:
if not acc:
continue
# WRITE THE INCOMPLETE DATA TO S3, BUT NOT TOO OFTEN
next_write = Date.now() + MINUTE
try:
if last_count_written != len(acc):
if DEBUG:
Log.note("write incomplete data ({{num}} lines) to {{uid}} in S3 next (time = {{next_write}})", uid=curr, next_write=next_write, num=len(acc))
self.bucket.write_lines(curr, (convert.value2json(a) for a in acc))
last_count_written = len(acc)
except Exception, e:
Log.note("Problem with write to S3", cause=e)
elif d[UID_PATH] != curr:
# WRITE acc TO S3 IF WE ARE MOVING TO A NEW KEY
try:
if acc:
if DEBUG:
Log.note("write complete data ({{num}} lines) to {{curr}} in S3", num=len(acc), curr=curr)
self.bucket.write_lines(curr, (convert.value2json(a) for a in acc))
last_count_written = 0
curr = d[UID_PATH]
acc = [d]
except Exception, e:
Log.warning("Can not store data", cause=e)
Thread.sleep(30*MINUTE)
开发者ID:klahnakoski,项目名称:MoDataSubmission,代码行数:34,代码来源:storage.py
示例4: main
def main():
try:
settings = startup.read_settings(defs=[{
"name": ["--id"],
"help": "id(s) to process. Use \"..\" for a range.",
"type": str,
"dest": "id",
"required": False
}])
constants.set(settings.constants)
Log.start(settings.debug)
if settings.args.id:
etl_one(settings)
return
hg = HgMozillaOrg(settings=settings.hg)
resources = Dict(hg=dictwrap(hg))
stopper = Signal()
for i in range(coalesce(settings.param.threads, 1)):
ETL(
name="ETL Loop " + unicode(i),
work_queue=settings.work_queue,
resources=resources,
workers=settings.workers,
settings=settings.param,
please_stop=stopper
)
Thread.wait_for_shutdown_signal(stopper, allow_exit=True)
except Exception, e:
Log.error("Problem with etl", e)
开发者ID:klahnakoski,项目名称:Activedata-ETL,代码行数:33,代码来源:etl.py
示例5: Log_usingThread
class Log_usingThread(BaseLog):
def __init__(self, logger):
# DELAYED LOAD FOR THREADS MODULE
from pyLibrary.thread.threads import Queue
self.queue = Queue("logs", max=10000, silent=True)
self.logger = logger
def worker(please_stop):
while not please_stop:
Thread.sleep(1)
logs = self.queue.pop_all()
for log in logs:
if log is Thread.STOP:
if DEBUG_LOGGING:
sys.stdout.write("Log_usingThread.worker() sees stop, filling rest of queue\n")
please_stop.go()
else:
self.logger.write(**log)
self.thread = Thread("log thread", worker)
self.thread.start()
def write(self, template, params):
try:
self.queue.add({"template": template, "params": params})
return self
except Exception, e:
sys.stdout.write("IF YOU SEE THIS, IT IS LIKELY YOU FORGOT TO RUN Log.start() FIRST\n")
raise e # OH NO!
开发者ID:klahnakoski,项目名称:intermittents,代码行数:31,代码来源:logs.py
示例6: __init__
def __init__(
self,
exchange, # name of the Pulse exchange
topic, # message name pattern to subscribe to ('#' is wildcard)
target=None, # WILL BE CALLED WITH PULSE PAYLOADS AND ack() IF COMPLETE$ED WITHOUT EXCEPTION
target_queue=None, # (aka self.queue) WILL BE FILLED WITH PULSE PAYLOADS
host='pulse.mozilla.org', # url to connect,
port=5671, # tcp port
user=None,
password=None,
vhost="/",
start=0, # USED AS STARTING POINT FOR ASSIGNING THE _meta.count ATTRIBUTE
ssl=True,
applabel=None,
heartbeat=False, # True to also get the Pulse heartbeat message
durable=False, # True to keep queue after shutdown
serializer='json',
broker_timezone='GMT',
settings=None
):
self.target_queue = target_queue
self.pulse_target = target
if (target_queue == None and target == None) or (target_queue != None and target != None):
Log.error("Expecting a queue (for fast digesters) or a target (for slow digesters)")
Thread.__init__(self, name="Pulse consumer for " + settings.exchange, target=self._worker)
self.settings = settings
settings.callback = self._got_result
settings.user = coalesce(settings.user, settings.username)
settings.applabel = coalesce(settings.applable, settings.queue, settings.queue_name)
settings.topic = topic
self.pulse = ModifiedGenericConsumer(settings, connect=True, **settings)
self.count = coalesce(start, 0)
self.start()
开发者ID:klahnakoski,项目名称:MoDevETL,代码行数:35,代码来源:pulse.py
示例7: __init__
def __init__(self, host, index, alias=None, name=None, port=9200, settings=None):
global _elasticsearch
if hasattr(self, "settings"):
return
from pyLibrary.queries.containers.lists import ListContainer
from pyLibrary.env import elasticsearch as _elasticsearch
self.settings = settings
self.default_name = coalesce(name, alias, index)
self.default_es = _elasticsearch.Cluster(settings=settings)
self.todo = Queue("refresh metadata", max=100000, unique=True)
self.meta=Dict()
table_columns = metadata_tables()
column_columns = metadata_columns()
self.meta.tables = ListContainer("meta.tables", [], wrap({c.name: c for c in table_columns}))
self.meta.columns = ListContainer("meta.columns", [], wrap({c.name: c for c in column_columns}))
self.meta.columns.insert(column_columns)
self.meta.columns.insert(table_columns)
# TODO: fix monitor so it does not bring down ES
if ENABLE_META_SCAN:
self.worker = Thread.run("refresh metadata", self.monitor)
else:
self.worker = Thread.run("refresh metadata", self.not_monitor)
return
开发者ID:klahnakoski,项目名称:esReplicate,代码行数:26,代码来源:meta.py
示例8: main
def main():
try:
settings = startup.read_settings()
Log.start(settings.debug)
constants.set(settings.constants)
with startup.SingleInstance(flavor_id=settings.args.filename):
with aws.s3.Bucket(settings.destination) as bucket:
if settings.param.debug:
if settings.source.durable:
Log.error("Can not run in debug mode with a durable queue")
synch = SynchState(bucket.get_key(SYNCHRONIZATION_KEY, must_exist=False))
else:
synch = SynchState(bucket.get_key(SYNCHRONIZATION_KEY, must_exist=False))
if settings.source.durable:
synch.startup()
queue = PersistentQueue(settings.param.queue_file)
if queue:
last_item = queue[len(queue) - 1]
synch.source_key = last_item._meta.count + 1
with pulse.Consumer(settings=settings.source, target=None, target_queue=queue, start=synch.source_key):
Thread.run("pulse log loop", log_loop, settings, synch, queue, bucket)
Thread.wait_for_shutdown_signal(allow_exit=True)
Log.warning("starting shutdown")
queue.close()
Log.note("write shutdown state to S3")
synch.shutdown()
except Exception, e:
Log.error("Problem with etl", e)
开发者ID:klahnakoski,项目名称:Activedata-ETL,代码行数:34,代码来源:pulse_logger.py
示例9: _insert_loop
def _insert_loop(self, please_stop=None):
bad_count = 0
while not please_stop:
try:
Thread.sleep(seconds=1)
messages = wrap(self.queue.pop_all())
if not messages:
continue
for g, mm in jx.groupby(messages, size=self.batch_size):
scrubbed = []
try:
for i, message in enumerate(mm):
if message is Thread.STOP:
please_stop.go()
return
scrubbed.append(_deep_json_to_string(message, depth=3))
finally:
self.es.extend(scrubbed)
bad_count = 0
except Exception, e:
Log.warning("Problem inserting logs into ES", cause=e)
bad_count += 1
if bad_count > MAX_BAD_COUNT:
Log.warning("Given up trying to write debug logs to ES index {{index}}", index=self.es.settings.index)
Thread.sleep(seconds=30)
开发者ID:klahnakoski,项目名称:TestFailures,代码行数:26,代码来源:log_usingElasticSearch.py
示例10: Log_usingLogger
class Log_usingLogger(BaseLog):
def __init__(self, settings):
self.logger = logging.Logger("unique name", level=logging.INFO)
self.logger.addHandler(make_log_from_settings(settings))
# TURNS OUT LOGGERS ARE REALLY SLOW TOO
self.queue = threads.Queue("log to classic logger", max=10000, silent=True)
self.thread = Thread("log to logger", time_delta_pusher, appender=self.logger.info, queue=self.queue, interval=timedelta(seconds=0.3))
self.thread.start()
def write(self, template, params):
# http://docs.python.org/2/library/logging.html# logging.LogRecord
self.queue.add({"template": template, "params": params})
def stop(self):
try:
if DEBUG_LOGGING:
sys.stdout.write("Log_usingLogger sees stop, adding stop to queue\n")
self.queue.add(Thread.STOP) # BE PATIENT, LET REST OF MESSAGE BE SENT
self.thread.join()
if DEBUG_LOGGING:
sys.stdout.write("Log_usingLogger done\n")
except Exception, e:
pass
try:
self.queue.close()
except Exception, f:
pass
开发者ID:klahnakoski,项目名称:intermittents,代码行数:29,代码来源:log_usingLogger.py
示例11: get_columns
def get_columns(self, table_name, column_name=None, force=False):
"""
RETURN METADATA COLUMNS
"""
try:
# LAST TIME WE GOT INFO FOR THIS TABLE
short_name = join_field(split_field(table_name)[0:1])
table = self.get_table(short_name)[0]
if not table:
table = Table(
name=short_name,
url=None,
query_path=None,
timestamp=Date.now()
)
with self.meta.tables.locker:
self.meta.tables.add(table)
self._get_columns(table=short_name)
elif force or table.timestamp == None or table.timestamp < Date.now() - MAX_COLUMN_METADATA_AGE:
table.timestamp = Date.now()
self._get_columns(table=short_name)
with self.meta.columns.locker:
columns = self.meta.columns.find(table_name, column_name)
if columns:
columns = jx.sort(columns, "name")
# AT LEAST WAIT FOR THE COLUMNS TO UPDATE
while len(self.todo) and not all(columns.get("last_updated")):
Log.note("waiting for columns to update {{columns|json}}", columns=[c.table+"."+c.es_column for c in columns if not c.last_updated])
Thread.sleep(seconds=1)
return columns
except Exception, e:
Log.error("Not expected", cause=e)
开发者ID:klahnakoski,项目名称:TestFailures,代码行数:34,代码来源:meta.py
示例12: __init__
def __init__(self, name, in_queue, out_queue, function):
Thread.__init__(self, name, self.event_loop)
self.in_queue = in_queue
self.out_queue = out_queue
self.function = function
self.num_runs = 0
self.start()
开发者ID:klahnakoski,项目名称:MoDataSubmission,代码行数:7,代码来源:multithread.py
示例13: wait_for_queue
def wait_for_queue(work_queue):
"""
SLEEP UNTIL WORK QUEU IS EMPTY ENOUGH FOR MORE
"""
# return
while True:
if len(work_queue) < MAX_QUEUE_SIZE:
break
Log.note("sleep for 5min")
Thread.sleep(seconds=5 * 60)
开发者ID:klahnakoski,项目名称:Activedata-ETL,代码行数:10,代码来源:backfill.py
示例14: worker
def worker(please_stop):
while not please_stop:
try:
response = requests.get("http://169.254.169.254/latest/meta-data/spot/termination-time")
if response.status_code != 400:
please_stop.go()
return
except Exception, e:
Thread.sleep(seconds=61, please_stop=please_stop)
Thread.sleep(seconds=11, please_stop=please_stop)
开发者ID:klahnakoski,项目名称:esReplicate,代码行数:10,代码来源:__init__.py
示例15: create_index
def create_index(
self,
index,
alias=None,
create_timestamp=None,
schema=None,
limit_replicas=None,
read_only=False,
tjson=False,
settings=None
):
if not alias:
alias = settings.alias = settings.index
index = settings.index = proto_name(alias, create_timestamp)
if settings.alias == index:
Log.error("Expecting index name to conform to pattern")
if settings.schema_file:
Log.error('schema_file attribute not supported. Use {"$ref":<filename>} instead')
if schema == None:
Log.error("Expecting a schema")
elif isinstance(schema, basestring):
schema = convert.json2value(schema, leaves=True)
else:
schema = convert.json2value(convert.value2json(schema), leaves=True)
if limit_replicas:
# DO NOT ASK FOR TOO MANY REPLICAS
health = self.get("/_cluster/health")
if schema.settings.index.number_of_replicas >= health.number_of_nodes:
Log.warning("Reduced number of replicas: {{from}} requested, {{to}} realized",
{"from": schema.settings.index.number_of_replicas},
to= health.number_of_nodes - 1
)
schema.settings.index.number_of_replicas = health.number_of_nodes - 1
self.post(
"/" + index,
data=schema,
headers={"Content-Type": "application/json"}
)
# CONFIRM INDEX EXISTS
while True:
try:
state = self.get("/_cluster/state", retry={"times": 5}, timeout=3)
if index in state.metadata.indices:
break
Log.note("Waiting for index {{index}} to appear", index=index)
except Exception, e:
Log.warning("Problem while waiting for index {{index}} to appear", index=index, cause=e)
Thread.sleep(seconds=1)
开发者ID:klahnakoski,项目名称:TestFailures,代码行数:54,代码来源:elasticsearch.py
示例16: worker
def worker(please_stop):
while not please_stop:
Thread.sleep(1)
logs = self.queue.pop_all()
for log in logs:
if log is Thread.STOP:
if DEBUG_LOGGING:
sys.stdout.write("TextLog_usingThread.worker() sees stop, filling rest of queue\n")
please_stop.go()
else:
self.logger.write(**log)
开发者ID:klahnakoski,项目名称:MoDataSubmission,代码行数:11,代码来源:text_logs.py
示例17: inners
def inners():
for t in data.hits.hits:
for i in t.inner_hits[literal_field(query_path)].hits.hits:
t._inner = i._source
for k, e in post_expressions.items():
t[k] = e(t)
yield t
if more_filter:
Thread.join(need_more)
for t in more[0].hits.hits:
yield t
开发者ID:klahnakoski,项目名称:MoDataSubmission,代码行数:11,代码来源:deep.py
示例18: _rate_limited_get_json
def _rate_limited_get_json(self, *args, **kwargs):
now = Date.now().unix
with self.rate_locker:
if self.request_times[self.request_pointer] >= now - 1:
Log.note("Rate limiting")
Thread.sleep(seconds=self.request_times[self.request_pointer] - now + 1)
self.request_times[self.request_pointer] = now
self.request_pointer += 1
self.request_pointer %= len(self.request_times)
return http.get_json(*args, **kwargs)
开发者ID:klahnakoski,项目名称:MoTreeherder,代码行数:11,代码来源:treeherder.py
示例19: _pinger
def _pinger(self, please_stop):
Log.note("pinger started")
while not please_stop:
Thread.sleep(till=self.ping_time + PING_PERIOD, please_stop=please_stop)
if please_stop: #EXIT EARLY, OTHERWISE WE MAY OVERWRITE THE shutdown
break
if Date.now() < self.ping_time + PING_PERIOD:
continue
try:
self.ping()
except Exception, e:
Log.warning("synchro.py could not ping", e)
开发者ID:klahnakoski,项目名称:Activedata-ETL,代码行数:12,代码来源:synchro.py
示例20: _get_and_retry
def _get_and_retry(self, url, **kwargs):
"""
requests 2.5.0 HTTPS IS A LITTLE UNSTABLE
"""
kwargs = set_default(kwargs, {"timeout": self.timeout.seconds})
try:
return http.get(url, **kwargs)
except Exception, e:
try:
Thread.sleep(seconds=5)
return http.get(url.replace("https://", "http://"), **kwargs)
except Exception, f:
Log.error("Tried {{url}} twice. Both failed.", {"url": url}, cause=[e, f])
开发者ID:klahnakoski,项目名称:Activedata-ETL,代码行数:13,代码来源:hg_mozilla_org.py
注:本文中的pyLibrary.thread.threads.Thread类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论