• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python typeguard.check_argument_types函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中typeguard.check_argument_types函数的典型用法代码示例。如果您正苦于以下问题:Python check_argument_types函数的具体用法?Python check_argument_types怎么用?Python check_argument_types使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了check_argument_types函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self,
                 name: str,
                 encoders: List[TemporalStateful],
                 vocabulary: Vocabulary,
                 data_id: str,
                 max_output_len: int = None,
                 hidden_dim: int = None,
                 activation: Callable = tf.nn.relu,
                 dropout_keep_prob: float = 1.0,
                 add_start_symbol: bool = False,
                 add_end_symbol: bool = False,
                 reuse: ModelPart = None,
                 save_checkpoint: str = None,
                 load_checkpoint: str = None,
                 initializers: InitializerSpecs = None) -> None:
        check_argument_types()
        ModelPart.__init__(self, name, reuse, save_checkpoint, load_checkpoint,
                           initializers)

        self.encoders = encoders
        self.vocabulary = vocabulary
        self.data_id = data_id
        self.max_output_len = max_output_len
        self.hidden_dim = hidden_dim
        self.activation = activation
        self.dropout_keep_prob = dropout_keep_prob
        self.add_start_symbol = add_start_symbol
        self.add_end_symbol = add_end_symbol
开发者ID:ufal,项目名称:neuralmonkey,代码行数:28,代码来源:sequence_labeler.py


示例2: beam_search_runner_range

def beam_search_runner_range(
        output_series: str,
        decoder: BeamSearchDecoder,
        max_rank: int = None,
        postprocess: Callable[[List[str]], List[str]] = None) -> List[
            BeamSearchRunner]:
    """Return beam search runners for a range of ranks from 1 to max_rank.

    This means there is max_rank output series where the n-th series contains
    the n-th best hypothesis from the beam search.

    Args:
        output_series: Prefix of output series.
        decoder: Beam search decoder shared by all runners.
        max_rank: Maximum rank of the hypotheses.
        postprocess: Series-level postprocess applied on output.

    Returns:
        List of beam search runners getting hypotheses with rank from 1 to
        max_rank.
    """
    check_argument_types()

    if max_rank is None:
        max_rank = decoder.beam_size

    if max_rank > decoder.beam_size:
        raise ValueError(
            ("The maximum rank ({}) cannot be "
             "bigger than beam size {}.").format(
                 max_rank, decoder.beam_size))

    return [BeamSearchRunner("{}.rank{:03d}".format(output_series, r),
                             decoder, r, postprocess)
            for r in range(1, max_rank + 1)]
开发者ID:ufal,项目名称:neuralmonkey,代码行数:35,代码来源:beamsearch_runner.py


示例3: __init__

    def __init__(self,
                 name: str,
                 input_sequence: Attendable,
                 hidden_size: int,
                 num_heads: int,
                 output_size: int = None,
                 state_proj_size: int = None,
                 dropout_keep_prob: float = 1.0,
                 reuse: ModelPart = None,
                 save_checkpoint: str = None,
                 load_checkpoint: str = None,
                 initializers: InitializerSpecs = None) -> None:
        """Initialize an instance of the encoder."""
        check_argument_types()
        ModelPart.__init__(self, name, reuse, save_checkpoint, load_checkpoint,
                           initializers)

        self.input_sequence = input_sequence
        self.hidden_size = hidden_size
        self.num_heads = num_heads
        self.output_size = output_size
        self.state_proj_size = state_proj_size
        self.dropout_keep_prob = dropout_keep_prob

        if self.dropout_keep_prob <= 0.0 or self.dropout_keep_prob > 1.0:
            raise ValueError("Dropout keep prob must be inside (0,1].")
开发者ID:ufal,项目名称:neuralmonkey,代码行数:26,代码来源:attentive.py


示例4: single_tensor

def single_tensor(files: List[str]) -> np.ndarray:
    """Load a single tensor from a numpy file."""
    check_argument_types()
    if len(files) == 1:
        return np.load(files[0])

    return np.concatenate([np.load(f) for f in files], axis=0)
开发者ID:ufal,项目名称:neuralmonkey,代码行数:7,代码来源:numpy_reader.py


示例5: __init__

    def __init__(self,
                 decoders: List[Any],
                 decoder_weights: List[ObjectiveWeight] = None,
                 l1_weight: float = 0.,
                 l2_weight: float = 0.,
                 clip_norm: float = None,
                 optimizer: tf.train.Optimizer = None,
                 var_scopes: List[str] = None,
                 var_collection: str = None) -> None:
        check_argument_types()

        if decoder_weights is None:
            decoder_weights = [None for _ in decoders]

        if len(decoder_weights) != len(decoders):
            raise ValueError(
                "decoder_weights (length {}) do not match decoders (length {})"
                .format(len(decoder_weights), len(decoders)))

        objectives = [CostObjective(dec, w)
                      for dec, w in zip(decoders, decoder_weights)]

        GenericTrainer.__init__(
            self,
            objectives=objectives,
            l1_weight=l1_weight,
            l2_weight=l2_weight,
            clip_norm=clip_norm,
            optimizer=optimizer,
            var_scopes=var_scopes,
            var_collection=var_collection)
开发者ID:ufal,项目名称:neuralmonkey,代码行数:31,代码来源:cross_entropy_trainer.py


示例6: from_file_list

def from_file_list(prefix: str,
                   shape: List[int],
                   suffix: str = "",
                   default_tensor_name: str = "arr_0") -> Callable:
    """Load a list of numpy arrays from a list of .npz numpy files.

    Args:
        prefix: A common prefix for the files in the list.
        shape: The shape of the numpy arrays stored in the referenced files.
        suffix: An optional suffix that will be appended to each path
        default_tensor_name: Key of the tensors to load from the npz files.

    Returns:
        A generator function that yields the loaded arryas.
    """
    check_argument_types()

    def load(files: List[str]) -> Iterable[np.ndarray]:
        for list_file in files:
            with open(list_file, encoding="utf-8") as f_list:
                for line in f_list:
                    path = os.path.join(prefix, line.rstrip()) + suffix
                    with np.load(path) as npz:
                        arr = npz[default_tensor_name]
                        arr_shape = list(arr.shape)
                        if arr_shape != shape:
                            raise ValueError(
                                "Shapes do not match: expected {}, found {}"
                                .format(shape, arr_shape))
                        yield arr
    return load
开发者ID:ufal,项目名称:neuralmonkey,代码行数:31,代码来源:numpy_reader.py


示例7: pooling

def pooling(
        prev_layer: tf.Tensor,
        prev_mask: tf.Tensor,
        specification: MaxPoolSpec,
        layer_num: int) -> Tuple[tf.Tensor, tf.Tensor]:
    try:
        check_argument_types()
    except TypeError as err:
        raise ValueError((
            "Specification of a max-pooling layer (number {} in config) "
            'needs to have 3 members: "M", pool size, stride, padding, '
            "was {}").format(layer_num, specification)) from err
    pool_type, pool_size, stride, pad = specification

    if pool_type == "M":
        pool_fn = tf.layers.max_pooling2d
    elif pool_type == "A":
        pool_fn = tf.layers.average_pooling2d
    else:
        raise ValueError(
            ("Unsupported type of pooling: {}, use 'M' for max-pooling or "
             "'A' for average pooling.").format(pool_type))

    if pad not in ["same", "valid"]:
        raise ValueError(
            "Padding must be 'same' or 'valid', was '{}' in layer {}."
            .format(pad, layer_num + 1))

    with tf.variable_scope("layer_{}_max_pool".format(layer_num)):
        next_layer = pool_fn(prev_layer, pool_size, stride)
        next_mask = tf.layers.max_pooling2d(prev_mask, pool_size, stride)
    return next_layer, next_mask
开发者ID:ufal,项目名称:neuralmonkey,代码行数:32,代码来源:cnn_encoder.py


示例8: __init__

 def __init__(self,
              output_series: str,
              decoder: SupportedDecoder,
              postprocess: Postprocessor = None) -> None:
     check_argument_types()
     BaseRunner[SupportedDecoder].__init__(self, output_series, decoder)
     self.postprocess = postprocess
开发者ID:ufal,项目名称:neuralmonkey,代码行数:7,代码来源:plain_runner.py


示例9: mlp_output

def mlp_output(layer_sizes: List[int],
               activation: Callable[[tf.Tensor], tf.Tensor] = tf.tanh,
               dropout_keep_prob: float = 1.0) -> Tuple[OutputProjection, int]:
    """Apply a multilayer perceptron.

    Compute RNN deep output using the multilayer perceptron
    with a specified activation function.
    (Pascanu et al., 2013 [https://arxiv.org/pdf/1312.6026v5.pdf])

    Arguments:
        layer_sizes: A list of sizes of the hiddel layers of the MLP
        dropout_keep_prob: the dropout keep probability
        activation: The activation function to use in each layer.
    """
    check_argument_types()

    def _projection(prev_state, prev_output, ctx_tensors, train_mode):
        mlp_input = tf.concat([prev_state, prev_output] + ctx_tensors, 1)

        return multilayer_projection(mlp_input, layer_sizes,
                                     activation=activation,
                                     dropout_keep_prob=dropout_keep_prob,
                                     train_mode=train_mode,
                                     scope="deep_output_mlp")

    return _projection, layer_sizes[-1]
开发者ID:ufal,项目名称:neuralmonkey,代码行数:26,代码来源:output_projection.py


示例10: word2vec_vocabulary

def word2vec_vocabulary(w2v: Word2Vec) -> Vocabulary:
    """Return the vocabulary from a word2vec object.

    This is a helper method used from configuration.
    """
    check_argument_types()
    return w2v.vocabulary
开发者ID:ufal,项目名称:neuralmonkey,代码行数:7,代码来源:word2vec.py


示例11: from_t2t_vocabulary

def from_t2t_vocabulary(path: str,
                        encoding: str = "utf-8") -> "Vocabulary":
    """Load a vocabulary generated during tensor2tensor training.

    Arguments:
        path: The path to the vocabulary file.
        encoding: The encoding of the vocabulary file (defaults to UTF-8).

    Returns:
        The new Vocabulary instantce.
    """
    check_argument_types()
    vocabulary = []  # type: List[str]

    with open(path, encoding=encoding) as wordlist:
        for line in wordlist:
            line = line.strip()

            # T2T vocab tends to wrap words in single quotes
            if ((line.startswith("'") and line.endswith("'"))
                    or (line.startswith('"') and line.endswith('"'))):
                line = line[1:-1]

            if line in ["<pad>", "<EOS>"]:
                continue

            vocabulary.append(line)

    log("Vocabulary form wordlist loaded, containing {} words"
        .format(len(vocabulary)))
    log_sample(vocabulary)

    return Vocabulary(vocabulary)
开发者ID:ufal,项目名称:neuralmonkey,代码行数:33,代码来源:vocabulary.py


示例12: linear_encoder_projection

def linear_encoder_projection(dropout_keep_prob: float) -> EncoderProjection:
    """Return a linear encoder projection.

    Return a projection function which applies dropout on concatenated
    encoder final states and returns a linear projection to a rnn_size-sized
    tensor.

    Arguments:
        dropout_keep_prob: The dropout keep probability
    """
    check_argument_types()

    def func(train_mode: tf.Tensor,
             rnn_size: int,
             encoders: List[Stateful]) -> tf.Tensor:

        if rnn_size is None:
            raise ValueError(
                "You must supply rnn_size for this type of encoder projection")

        en_concat = concat_encoder_projection(train_mode, None, encoders)

        return dropout(
            tf.layers.dense(en_concat, rnn_size, name="encoders_projection"),
            dropout_keep_prob, train_mode)

    return cast(EncoderProjection, func)
开发者ID:ufal,项目名称:neuralmonkey,代码行数:27,代码来源:encoder_projection.py


示例13: __init__

    def __init__(self,
                 name: str,
                 input_shape: List[int],
                 data_id: str,
                 projection_dim: int = None,
                 ff_hidden_dim: int = None,
                 reuse: ModelPart = None,
                 save_checkpoint: str = None,
                 load_checkpoint: str = None,
                 initializers: InitializerSpecs = None) -> None:
        """Instantiate SpatialFiller.

        Args:
            name: Name of the model part.
            input_shape: Dimensionality of the input.
            data_id: Name of the data series with numpy objects.
            projection_dim: Optional, dimension of the states projection.
        """
        check_argument_types()
        ModelPart.__init__(
            self, name, reuse, save_checkpoint, load_checkpoint, initializers)

        self.data_id = data_id
        self.input_shape = input_shape
        self.projection_dim = projection_dim
        self.ff_hidden_dim = ff_hidden_dim

        if self.ff_hidden_dim is not None and self.projection_dim is None:
            raise ValueError(
                "projection_dim must be provided when using ff_hidden_dim")

        if len(self.input_shape) != 3:
            raise ValueError("The input shape should have 3 dimensions.")
开发者ID:ufal,项目名称:neuralmonkey,代码行数:33,代码来源:numpy_stateful_filler.py


示例14: __init__

    def __init__(self, n: int = 4,
                 deduplicate: bool = False,
                 name: str = None,
                 multiple_references_separator: str = None) -> None:
        """Instantiate BLEU evaluator.

        Args:
            n: Longest n-grams considered.
            deduplicate: Flag whether repated tokes should be treated as one.
            name: Name displayed in the logs and TensorBoard.
            multiple_references_separator: Token that separates multiple
                reference sentences. If ``None``, it assumes the reference is
                one sentence only.
        """
        check_argument_types()

        if name is None:
            name = "BLEU-{}".format(n)
            if deduplicate:
                name += "-dedup"
        super().__init__(name)

        self.n = n
        self.deduplicate = deduplicate
        self.multiple_references_separator = multiple_references_separator
开发者ID:ufal,项目名称:neuralmonkey,代码行数:25,代码来源:bleu.py


示例15: __init__

    def __init__(self,
                 name: str,
                 parent_decoder: AutoregressiveDecoder,
                 beam_size: int,
                 max_steps: int,
                 length_normalization: float) -> None:
        """Construct the beam search decoder graph.

        Arguments:
            name: The name for the model part.
            parent_decoder: An autoregressive decoder from which to sample.
            beam_size: The number of hypotheses in the beam.
            max_steps: The maximum number of time steps to perform.
            length_normalization: The alpha parameter from Eq. 14 in the paper.
        """
        check_argument_types()
        ModelPart.__init__(self, name)

        self.parent_decoder = parent_decoder
        self.beam_size = beam_size
        self.length_normalization = length_normalization
        self.max_steps_int = max_steps

        # Create a placeholder for maximum number of steps that is necessary
        # during ensembling, when the decoder is called repetitively with the
        # max_steps attribute set to one.
        self.max_steps = tf.placeholder_with_default(self.max_steps_int, [])

        self._initial_loop_state = None  # type: Optional[BeamSearchLoopState]
开发者ID:ufal,项目名称:neuralmonkey,代码行数:29,代码来源:beam_search_decoder.py


示例16: __init__

    def __init__(self,
                 name: str,
                 encoder: TemporalStateful,
                 vocabulary: Vocabulary,
                 data_id: str,
                 max_length: int = None,
                 merge_repeated_targets: bool = False,
                 merge_repeated_outputs: bool = True,
                 beam_width: int = 1,
                 reuse: ModelPart = None,
                 save_checkpoint: str = None,
                 load_checkpoint: str = None,
                 initializers: InitializerSpecs = None) -> None:
        check_argument_types()
        ModelPart.__init__(self, name, reuse, save_checkpoint, load_checkpoint,
                           initializers)

        self.encoder = encoder
        self.vocabulary = vocabulary
        self.data_id = data_id
        self.max_length = max_length

        self.merge_repeated_targets = merge_repeated_targets
        self.merge_repeated_outputs = merge_repeated_outputs
        self.beam_width = beam_width
开发者ID:ufal,项目名称:neuralmonkey,代码行数:25,代码来源:ctc_decoder.py


示例17: __init__

    def __init__(self,
                 num_sessions: int,
                 num_threads: int,
                 save_n_best: int = 1,
                 minimize_metric: bool = False,
                 gpu_allow_growth: bool = True,
                 per_process_gpu_memory_fraction: float = 1.0,
                 enable_tf_debug: bool = False) -> None:
        """Initialize a TensorflowManager.

        At this moment the graph must already exist. This method initializes
        required number of TensorFlow sessions and initializes them with
        provided variable files if they are provided.

        Args:
            num_sessions: Number of sessions to be initialized.
            num_threads: Number of threads sessions will run in.
            save_n_best: How many best models to keep
            minimize_metric: Whether the best model is the one with the lowest
                or the highest score
            gpu_allow_growth: TF to allocate incrementally, not all at once.
            per_process_gpu_memory_fraction: Limit TF memory use.
        """
        check_argument_types()

        self.session_cfg = tf.ConfigProto()
        self.session_cfg.inter_op_parallelism_threads = num_threads
        self.session_cfg.intra_op_parallelism_threads = num_threads
        self.session_cfg.allow_soft_placement = True  # needed for more GPUs
        # pylint: disable=no-member
        self.session_cfg.gpu_options.allow_growth = gpu_allow_growth
        self.session_cfg.gpu_options.per_process_gpu_memory_fraction = \
            per_process_gpu_memory_fraction
        # pylint: enable=no-member

        if save_n_best < 1:
            raise Exception("save_n_best parameter must be greater than zero")
        self.saver_max_to_keep = save_n_best
        self.minimize_metric = minimize_metric
        self.num_sessions = num_sessions

        self.sessions = [tf.Session(config=self.session_cfg)
                         for _ in range(self.num_sessions)]

        if enable_tf_debug:
            self.sessions = [tf_debug.LocalCLIDebugWrapperSession(sess)
                             for sess in self.sessions]

        self.saver = None

        self.best_score_index = None  # type: Optional[int]
        self.best_score_epoch = 0
        self.best_score_batch = 0

        init_score = np.inf if self.minimize_metric else -np.inf
        self.saved_scores = [init_score for _ in range(self.saver_max_to_keep)]
        self.best_score = init_score

        self.variables_files = []  # type: List[str]
        self._best_vars_file = None  # type: Optional[str]
开发者ID:ufal,项目名称:neuralmonkey,代码行数:60,代码来源:tf_manager.py


示例18: __init__

    def __init__(self,
                 name: str,
                 smooth_method: str = "exp",
                 smooth_value: float = 0.0,
                 force: bool = False,
                 lowercase: bool = False,
                 tokenize: str = "none",
                 use_effective_order: bool = False) -> None:
        check_argument_types()
        super().__init__(name)

        if tokenize not in TOKENIZERS:
            raise ValueError(
                "Unknown tokenizer '{}'. You must use one of sacrebleu's "
                "tokenizers: {}".format(tokenize, str(TOKENIZERS)))

        if smooth_method not in SMOOTH_VARIANTS:
            raise ValueError(
                "Unknown smoothing '{}'. You must use one of sacrebleu's "
                "smoothing methods: {}".format(smooth_method,
                                               str(SMOOTH_VARIANTS)))

        self.smooth_method = smooth_method
        self.smooth_value = smooth_value
        self.force = force
        self.lowercase = lowercase
        self.tokenize = tokenize
        self.use_effective_order = use_effective_order
开发者ID:ufal,项目名称:neuralmonkey,代码行数:28,代码来源:sacrebleu.py


示例19: maxout_output

def maxout_output(
        maxout_size: int,
        dropout_keep_prob: float = 1.0) -> Tuple[OutputProjection, int]:
    """Apply maxout.

    Compute RNN output out of the previous state and output, and the
    context tensors returned from attention mechanisms, as described
    in the article

    This function corresponds to the equations for computation the
    t_tilde in the Bahdanau et al. (2015) paper, on page 14,
    with the maxout projection, before the last linear projection.

    Arguments:
        maxout_size: The size of the hidden maxout layer in the deep output

    Returns:
        Returns the maxout projection of the concatenated inputs
    """
    check_argument_types()

    def _projection(prev_state, prev_output, ctx_tensors, train_mode):
        state_out_ctx = tf.concat([prev_state, prev_output] + ctx_tensors, 1)
        return dropout(
            maxout(state_out_ctx, maxout_size),
            dropout_keep_prob, train_mode)

    return _projection, maxout_size
开发者ID:ufal,项目名称:neuralmonkey,代码行数:28,代码来源:output_projection.py


示例20: __init__

    def __init__(
            self,
            name: str,
            parent: TemporalStateful,
            factor: int,
            projection_size: int = None,
            projection_activation: Activation = None) -> None:
        """Initialize SentenceSplitter.

        Args:
            parent: TemporalStateful whose states will be split.
            factor: Factor by which the states will be split - the  resulting
                sequence will be longer by this factor.
            projection_size: If not None, specifies dimensionality of a
                projection before state splitting.
            projection_activation: Non-linearity function for the optional
                projection.
        """
        check_argument_types()

        ModelPart.__init__(
            self, name=name, save_checkpoint=None, load_checkpoint=None,
            initializers=None)
        self.parent = parent
        self.factor = factor
        self.projection_size = projection_size
        self.activation = projection_activation

        if projection_size is not None and projection_size % factor != 0:
            raise ValueError((
                "Dimension of projection ({}) must be "
                "dividable by the given factor ({}).").format(
                    projection_size, factor))
开发者ID:ufal,项目名称:neuralmonkey,代码行数:33,代码来源:sequence_split.py



注:本文中的typeguard.check_argument_types函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python types.get函数代码示例发布时间:2022-05-27
下一篇:
Python contenttype.get_filetype函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap