本文整理汇总了Python中mapreduce.util.for_name函数的典型用法代码示例。如果您正苦于以下问题:Python for_name函数的具体用法?Python for_name怎么用?Python for_name使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了for_name函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testGlobalName
def testGlobalName(self):
"""Tests when the name has no dots in it."""
try:
util.for_name("this_is_a_bad_module_name")
except ImportError, e:
self.assertTrue(str(e).startswith(
"Could not find 'this_is_a_bad_module_name' on path "))
开发者ID:Hao-Hsuan,项目名称:KhanLatest,代码行数:7,代码来源:util_test.py
示例2: run
def run(self, job_name, sequence_num, namespace, output, complete_fn,
mapreduce_pipeline_args):
results = []
try:
iterator = input_readers.GoogleCloudStorageInputReader(output, 0)
for file_reader in iterator:
for item in file_reader:
# Map/reduce puts reducer output into blobstore files as a
# string obtained via "str(result)". Use AST as a safe
# alternative to eval() to get the Python object back.
results.append(ast.literal_eval(item))
if complete_fn:
util.for_name(complete_fn)(mapreduce_pipeline_args, results)
with Namespace(namespace):
db.run_in_transaction(
DurableJobEntity._complete_job, job_name, sequence_num,
MapReduceJob.build_output(self.root_pipeline_id, results))
# Don't know what exceptions are currently, or will be in future,
# thrown from Map/Reduce or Pipeline libraries; these are under
# active development.
#
# pylint: disable=broad-except
except Exception, ex:
logging.critical('Failed running map/reduce job %s: %s', job_name,
str(ex))
common_utils.log_exception_origin()
time_completed = time.time()
with Namespace(namespace):
db.run_in_transaction(
DurableJobEntity._fail_job, job_name, sequence_num,
MapReduceJob.build_output(self.root_pipeline_id, results,
str(ex)))
开发者ID:google,项目名称:coursebuilder-core,代码行数:33,代码来源:jobs.py
示例3: testBadClass
def testBadClass(self):
"""Tests when the class is found but the function name is missing."""
try:
util.for_name("__main__.TestHandlerWithArgs.missing")
except ImportError, e:
self.assertEquals(
"Could not find 'missing' on path '__main__.TestHandlerWithArgs'",
str(e))
开发者ID:Hao-Hsuan,项目名称:KhanLatest,代码行数:8,代码来源:util_test.py
示例4: testBadModule
def testBadModule(self):
"""Tests when the module name is bogus."""
try:
util.for_name("this_is_a_bad_module_name.stuff")
except ImportError, e:
self.assertEquals(
"Could not find 'stuff' on path 'this_is_a_bad_module_name'",
str(e))
开发者ID:Hao-Hsuan,项目名称:KhanLatest,代码行数:8,代码来源:util_test.py
示例5: testBadFunction
def testBadFunction(self):
"""Tests when the module name is good but the function is missing."""
try:
util.for_name("__main__.does_not_exist")
except ImportError, e:
self.assertEquals(
"Could not find 'does_not_exist' on path '__main__'",
str(e))
开发者ID:Hao-Hsuan,项目名称:KhanLatest,代码行数:8,代码来源:util_test.py
示例6: validate
def validate(cls, mapper_spec):
super(DjangoModelInputReader, cls).validate(mapper_spec)
params = _get_params(mapper_spec)
if cls.NAMESPACE_PARAM in params:
raise BadReaderParamsError("Namespaces are not supported.")
entity_kind_name = params[cls.ENTITY_KIND_PARAM]
try:
util.for_name(entity_kind_name)
except ImportError, e:
raise BadReaderParamsError("Bad entity kind: %s" % e)
开发者ID:3quarterstack,项目名称:simple_blog,代码行数:13,代码来源:input_readers.py
示例7: _to_map_job_config
def _to_map_job_config(cls,
mr_spec,
# TODO(user): Remove this parameter after it can be
# read from mr_spec.
queue_name):
"""Converts model.MapreduceSpec back to JobConfig.
This method allows our internal methods to use JobConfig directly.
This method also allows us to expose JobConfig as an API during execution,
despite that it is not saved into datastore.
Args:
mr_spec: model.MapreduceSpec.
queue_name: queue name.
Returns:
The JobConfig object for this job.
"""
mapper_spec = mr_spec.mapper
# 0 means all the old APIs before api_version is introduced.
api_version = mr_spec.params.get("api_version", 0)
old_api = api_version == 0
# We can not always convert MapreduceSpec generated by older API
# to JobConfig. Thus, mr framework should use/expose the returned JobConfig
# object with caution when a job is started with an old API.
# In this case, this method only tries not to blow up and assemble a
# JobConfig object as accurate as possible.
return cls(_lenient=old_api,
job_name=mr_spec.name,
job_id=mr_spec.mapreduce_id,
# handler_spec from older API may not have map_job.Mapper type.
mapper=util.for_name(mapper_spec.handler_spec),
input_reader_cls=mapper_spec.input_reader_class(),
input_reader_params=input_readers._get_params(mapper_spec),
output_writer_cls=mapper_spec.output_writer_class(),
output_writer_params=output_writers._get_params(mapper_spec),
shard_count=mapper_spec.shard_count,
queue_name=queue_name,
user_params=mr_spec.params.get("user_params"),
shard_max_attempts=mr_spec.params.get("shard_max_attempts"),
done_callback_url=mr_spec.params.get("done_callback"),
_force_writes=mr_spec.params.get("force_writes"),
_base_path=mr_spec.params["base_path"],
_task_max_attempts=mr_spec.params.get("task_max_attempts"),
_task_max_data_processing_attempts=(
mr_spec.params.get("task_max_data_processing_attempts")),
_hooks_cls=util.for_name(mr_spec.hooks_class_name),
_app=mr_spec.params.get("app_id"),
_api_version=api_version)
开发者ID:Honglongwu,项目名称:genomics-tools,代码行数:49,代码来源:map_job_config.py
示例8: __iter__
def __iter__(self):
"""Create a generator for entities or keys in the range.
Iterating through entries moves query range past the consumed entries.
Yields:
next entry.
"""
while True:
entries_query = self._key_range.make_ascending_query(
util.for_name(self._entity_kind), self._keys_only)
entries_list = entries_query.fetch(limit=self.batch_size)
if not entries_list:
return
for entry in entries_list:
if hasattr(entry, 'key'):
key = entry.key()
else:
key = entry
self._key_range = key_range.KeyRange(key,
self._key_range.key_end,
self._key_range.direction,
False,
self._key_range.include_end)
yield entry
开发者ID:anandu,项目名称:pylibs,代码行数:28,代码来源:input_readers.py
示例9: __iter__
def __iter__(self):
"""Create a generator for model instances for entities.
Iterating through entities moves query range past the consumed entities.
Yields:
next model instance.
"""
while True:
if self._current_key_range is None:
break
while True:
query = self._current_key_range.make_ascending_query(
util.for_name(self._entity_kind))
results = query.fetch(limit=self._batch_size)
if not results:
self._advance_key_range()
break
for model_instance in results:
key = model_instance.key()
self._current_key_range.advance(key)
yield model_instance
开发者ID:ChiHungLam,项目名称:dsm2-grid-map,代码行数:26,代码来源:input_readers.py
示例10: output_writer_class
def output_writer_class(self):
"""Get output writer class.
Returns:
output writer class object.
"""
return self.output_writer_spec and util.for_name(self.output_writer_spec)
开发者ID:elsigh,项目名称:browserscope,代码行数:7,代码来源:model.py
示例11: input_reader_class
def input_reader_class(self):
"""Get input reader class.
Returns:
input reader class object.
"""
return util.for_name(self.input_reader_spec)
开发者ID:elsigh,项目名称:browserscope,代码行数:7,代码来源:model.py
示例12: handler_for_name
def handler_for_name(fq_name):
"""Resolves and instantiates handler by fully qualified name.
NOTE: This is a clone of a function in the map/reduce module which has
also been taught that map and reduce functions may be marked with
@classmethod, as opposed to only member functions of default-constructable
classes or @staticmethod. It is applied as a monkey-patch to fix the base
library.
First resolves the name using for_name call. Then if it resolves to a
class, instantiates a class, if it resolves to a method - instantiates the
class and binds method to the instance.
Args:
fq_name: fully qualified name of something to find.
Returns:
handler instance which is ready to be called.
"""
resolved_name = mapreduce_util.for_name(fq_name)
if isinstance(resolved_name, (type, types.ClassType)):
# create new instance if this is type
return resolved_name()
elif (isinstance(resolved_name, types.MethodType) and
resolved_name.im_self is None):
# bind the method
return getattr(resolved_name.im_class(), resolved_name.__name__)
else:
# Already bound -- classmethod or staticmethod.
return resolved_name
开发者ID:google,项目名称:coursebuilder-core,代码行数:31,代码来源:mapreduce_module.py
示例13: _get_params
def _get_params(self, validator_parameter, name_prefix):
"""Retrieves additional user-supplied params for the job and validates them.
Args:
validator_parameter: name of the request parameter which supplies
validator for this parameter set.
name_prefix: common prefix for all parameter names in the request.
Raises:
Any exception raised by the 'params_validator' request parameter if
the params fail to validate.
"""
params_validator = self.request.get(validator_parameter)
user_params = {}
for key in self.request.arguments():
if key.startswith(name_prefix):
values = self.request.get_all(key)
adjusted_key = key[len(name_prefix):]
if len(values) == 1:
user_params[adjusted_key] = values[0]
else:
user_params[adjusted_key] = values
if params_validator:
resolved_validator = util.for_name(params_validator)
resolved_validator(user_params)
return user_params
开发者ID:koush,项目名称:appengine-mapreduce,代码行数:29,代码来源:handlers.py
示例14: __iter__
def __iter__(self):
k_range = self._key_range
# Namespaces are not supported by djangoappengine
if k_range.namespace:
return
model_class = util.for_name(self._query_spec.model_class_path)
q = model_class.objects.all()
if k_range.key_start:
if k_range.include_start:
q = q.filter(pk__gte=k_range.key_start.id_or_name())
else:
q = q.filter(pk__gt=k_range.key_start.id_or_name())
if k_range.key_end:
if k_range.include_end:
q = q.filter(pk__lte=k_range.key_end.id_or_name())
else:
q = q.filter(pk__lt=k_range.key_end.id_or_name())
q = q.order_by('pk')
q = set_config(q, batch_size=self._query_spec.batch_size)
if self._cursor:
q = set_cursor(q, self._cursor)
self._query = q
for entity in self._query.iterator():
yield entity
开发者ID:3quarterstack,项目名称:simple_blog,代码行数:34,代码来源:input_readers.py
示例15: run
def run(self,
job_name,
mapper_spec,
shuffler_spec,
reducer_spec,
input_reader_spec,
output_writer_spec=None,
mapper_params=None,
shuffler_params=None,
reducer_params=None,
shards=None,
combiner_spec=None):
map_pipeline = yield MapPipeline(job_name,
mapper_spec,
input_reader_spec,
params=mapper_params,
shards=shards)
shuffler_pipeline = yield util.for_name(shuffler_spec)(job_name, shuffler_params, map_pipeline)
reducer_pipeline = yield mapreduce_pipeline.ReducePipeline(
job_name,
reducer_spec,
output_writer_spec,
reducer_params,
shuffler_pipeline,
combiner_spec=combiner_spec)
with pipeline.After(reducer_pipeline):
all_temp_files = yield pipeline_common.Extend(
map_pipeline, shuffler_pipeline)
yield mapper_pipeline._CleanupPipeline(all_temp_files)
yield pipeline_common.Return(reducer_pipeline)
开发者ID:cloudysunny14,项目名称:appengine-mapreduce2GCS,代码行数:30,代码来源:mapreduce_pipeline.py
示例16: split_input
def split_input(cls, mapper_spec):
"""Splits query into shards without fetching query results.
Tries as best as it can to split the whole query result set into equal
shards. Due to difficulty of making the perfect split, resulting shards'
sizes might differ significantly from each other. The actual number of
shards might also be less then requested (even 1), though it is never
greater.
Current implementation does key-lexicographic order splitting. It requires
query not to specify any __key__-based ordering. If an index for
query.order('-__key__') query is not present, an inaccurate guess at
sharding will be made by splitting the full key range.
Args:
mapper_spec: MapperSpec with params containing 'entity_kind'.
May also have 'batch_size' in the params to specify the number
of entities to process in each batch.
Returns:
A list of InputReader objects of length <= number_of_shards. These
may be DatastoreInputReader or DatastoreKeyInputReader objects.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Input reader class mismatch")
params = mapper_spec.params
if cls.ENTITY_KIND_PARAM not in params:
raise BadReaderParamsError("Missing mapper parameter 'entity_kind'")
entity_kind_name = params[cls.ENTITY_KIND_PARAM]
shard_count = mapper_spec.shard_count
app = params.get(cls._APP_PARAM)
# keys_only remains for backwards compatability. It may go away.
keys_only = util.parse_bool(params.get(cls.KEYS_ONLY_PARAM, False))
if keys_only:
raise BadReaderParamsError("The keys_only parameter is obsolete. "
"Use DatastoreKeyInputReader instead.")
# Fail fast if Model cannot be located.
util.for_name(entity_kind_name)
return cls._split_input_from_params(
app, entity_kind_name, params, shard_count)
开发者ID:2010gis,项目名称:v2ex,代码行数:47,代码来源:input_readers.py
示例17: _get_raw_entity_kind
def _get_raw_entity_kind(cls, model_classpath):
entity_type = util.for_name(model_classpath)
if isinstance(entity_type, db.Model):
return entity_type.kind()
elif isinstance(entity_type, (ndb.Model, ndb.MetaModel)):
# pylint: disable=protected-access
return entity_type._get_kind()
else:
return util.get_short_name(model_classpath)
开发者ID:Alattack,项目名称:catapult,代码行数:9,代码来源:model_datastore_input_reader.py
示例18: run
def run(self, job_id, job_class_str, kwargs):
job_class = mapreduce_util.for_name(job_class_str)
job_class.register_start(job_id, metadata={
job_class._OUTPUT_KEY_ROOT_PIPELINE_ID: self.root_pipeline_id
})
# TODO(sll): Need try/except/mark-as-canceled here?
output = yield mapreduce_pipeline.MapreducePipeline(**kwargs)
yield StoreMapReduceResults(job_id, job_class_str, output)
开发者ID:VictoriaRoux,项目名称:oppia,代码行数:9,代码来源:jobs.py
示例19: validate
def validate(cls, job_config):
"""Inherit docs."""
super(ModelDatastoreInputReader, cls).validate(job_config)
params = job_config.input_reader_params
entity_kind = params[cls.ENTITY_KIND_PARAM]
# Fail fast if Model cannot be located.
try:
model_class = util.for_name(entity_kind)
except ImportError, e:
raise errors.BadReaderParamsError("Bad entity kind: %s" % e)
开发者ID:Alattack,项目名称:catapult,代码行数:10,代码来源:model_datastore_input_reader.py
示例20: split_input
def split_input(cls, mapper_spec):
"""Splits query into shards without fetching query results.
Tries as best as it can to split the whole query result set into equal
shards. Due to difficulty of making the perfect split, resulting shards'
sizes might differ significantly from each other. The actual number of
shards might also be less then requested (even 1), though it is never
greater.
Current implementation does key-lexicographic order splitting. It requires
query not to specify any __key__-based ordering. If an index for
query.order('-__key__') query is not present, an inaccurate guess at
sharding will be made by splitting the full key range.
Args:
mapper_spec: MapperSpec with params containing 'entity_kind'.
May also have 'batch_size' in the params to specify the number
of entities to process in each batch.
Returns:
A list of DatastoreInputReader objects of length <= number_of_shards.
Raises:
BadReaderParamsError if required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Input reader class mismatch")
params = mapper_spec.params
if "entity_kind" not in params:
raise BadReaderParamsError("Missing mapper parameter 'entity_kind'")
entity_kind_name = params["entity_kind"]
entity_kind = util.for_name(entity_kind_name)
shard_count = mapper_spec.shard_count
batch_size = int(params.get("batch_size", cls._BATCH_SIZE))
keys_only = int(params.get("keys_only", False))
ds_query = entity_kind.all()._get_query()
ds_query.Order("__key__")
first_entity = ds_query.Get(1)
if not first_entity:
return []
else:
first_entity_key = first_entity[0].key()
ds_query.Order(("__key__", datastore.Query.DESCENDING))
try:
last_entity = ds_query.Get(1)
last_entity_key = last_entity[0].key()
except db.NeedIndexError, e:
logging.warning("Cannot create accurate approximation of keyspace, "
"guessing instead. Please address this problem: %s", e)
last_entity_key = key_range.KeyRange.guess_end_key(
entity_kind.kind(), first_entity_key)
开发者ID:anandu,项目名称:pylibs,代码行数:54,代码来源:input_readers.py
注:本文中的mapreduce.util.for_name函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论