本文整理汇总了Python中tensorflow.variance_scaling_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python variance_scaling_initializer函数的具体用法?Python variance_scaling_initializer怎么用?Python variance_scaling_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了variance_scaling_initializer函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: get_variable_initializer
def get_variable_initializer(hparams):
"""Get variable initializer from hparams."""
if not hparams.initializer:
return None
mlperf_log.transformer_print(key=mlperf_log.MODEL_HP_INITIALIZER_GAIN,
value=hparams.initializer_gain,
hparams=hparams)
if not tf.contrib.eager.in_eager_mode():
tf.logging.info("Using variable initializer: %s", hparams.initializer)
if hparams.initializer == "orthogonal":
return tf.orthogonal_initializer(gain=hparams.initializer_gain)
elif hparams.initializer == "uniform":
max_val = 0.1 * hparams.initializer_gain
return tf.random_uniform_initializer(-max_val, max_val)
elif hparams.initializer == "normal_unit_scaling":
return tf.variance_scaling_initializer(
hparams.initializer_gain, mode="fan_avg", distribution="normal")
elif hparams.initializer == "uniform_unit_scaling":
return tf.variance_scaling_initializer(
hparams.initializer_gain, mode="fan_avg", distribution="uniform")
elif hparams.initializer == "xavier":
return tf.contrib.layers.xavier_initializer()
else:
raise ValueError("Unrecognized initializer: %s" % hparams.initializer)
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:26,代码来源:optimize.py
示例2: q_network
def q_network(X_state, name):
inputs = X_state
with tf.variable_scope(name) as scope:
dense_outputs = tf.layers.dense(inputs, 100, tf.nn.relu, kernel_initializer=tf.variance_scaling_initializer())
outputs = tf.layers.dense(dense_outputs, n_outputs, kernel_initializer=tf.variance_scaling_initializer())
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope.name)
trainable_vars_by_name = {var.name[len(scope.name):]: var for var in trainable_vars}
return outputs, trainable_vars_by_name
开发者ID:sunmingtao,项目名称:sample-code,代码行数:8,代码来源:my-cart-pole.py
示例3: _get_variable_initializer
def _get_variable_initializer(hparams):
if hparams.initializer == "orthogonal":
return tf.orthogonal_initializer(gain=hparams.initializer_gain)
elif hparams.initializer == "uniform":
max_val = 0.1 * hparams.initializer_gain
return tf.random_uniform_initializer(-max_val, max_val)
elif hparams.initializer == "normal_unit_scaling":
return tf.variance_scaling_initializer(
hparams.initializer_gain, mode="fan_avg", distribution="normal")
elif hparams.initializer == "uniform_unit_scaling":
return tf.variance_scaling_initializer(
hparams.initializer_gain, mode="fan_avg", distribution="uniform")
else:
raise ValueError("Unrecognized initializer: %s" % hparams.initializer)
开发者ID:zeyu-h,项目名称:tensor2tensor,代码行数:14,代码来源:model_builder.py
示例4: get_logits
def get_logits(self, image):
gauss_init = tf.random_normal_initializer(stddev=0.01)
with argscope(Conv2D,
kernel_initializer=tf.variance_scaling_initializer(scale=2.)), \
argscope([Conv2D, FullyConnected], activation=tf.nn.relu), \
argscope([Conv2D, MaxPooling], data_format='channels_last'):
# necessary padding to get 55x55 after conv1
image = tf.pad(image, [[0, 0], [2, 2], [2, 2], [0, 0]])
l = Conv2D('conv1', image, filters=96, kernel_size=11, strides=4, padding='VALID')
# size: 55
visualize_conv1_weights(l.variables.W)
l = tf.nn.lrn(l, 2, bias=1.0, alpha=2e-5, beta=0.75, name='norm1')
l = MaxPooling('pool1', l, 3, strides=2, padding='VALID')
# 27
l = Conv2D('conv2', l, filters=256, kernel_size=5, split=2)
l = tf.nn.lrn(l, 2, bias=1.0, alpha=2e-5, beta=0.75, name='norm2')
l = MaxPooling('pool2', l, 3, strides=2, padding='VALID')
# 13
l = Conv2D('conv3', l, filters=384, kernel_size=3)
l = Conv2D('conv4', l, filters=384, kernel_size=3, split=2)
l = Conv2D('conv5', l, filters=256, kernel_size=3, split=2)
l = MaxPooling('pool3', l, 3, strides=2, padding='VALID')
l = FullyConnected('fc6', l, 4096,
kernel_initializer=gauss_init,
bias_initializer=tf.ones_initializer())
l = Dropout(l, rate=0.5)
l = FullyConnected('fc7', l, 4096, kernel_initializer=gauss_init)
l = Dropout(l, rate=0.5)
logits = FullyConnected('fc8', l, 1000, kernel_initializer=gauss_init)
return logits
开发者ID:quanlzheng,项目名称:tensorpack,代码行数:31,代码来源:alexnet.py
示例5: build_graph
def build_graph(self, image, label):
image = image_preprocess(image, bgr=True)
image = tf.transpose(image, [0, 3, 1, 2])
cfg = {
18: ([2, 2, 2, 2], preresnet_basicblock),
34: ([3, 4, 6, 3], preresnet_basicblock),
}
defs, block_func = cfg[DEPTH]
with argscope(Conv2D, use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_out')), \
argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm], data_format='channels_first'):
convmaps = (LinearWrap(image)
.Conv2D('conv0', 64, 7, strides=2, activation=BNReLU)
.MaxPooling('pool0', 3, strides=2, padding='SAME')
.apply2(preresnet_group, 'group0', block_func, 64, defs[0], 1)
.apply2(preresnet_group, 'group1', block_func, 128, defs[1], 2)
.apply2(preresnet_group, 'group2', block_func, 256, defs[2], 2)
.apply2(preresnet_group, 'group3new', block_func, 512, defs[3], 1)())
print(convmaps)
convmaps = GlobalAvgPooling('gap', convmaps)
logits = FullyConnected('linearnew', convmaps, 1000)
loss = compute_loss_and_error(logits, label)
wd_cost = regularize_cost('.*/W', l2_regularizer(1e-4), name='l2_regularize_loss')
add_moving_summary(loss, wd_cost)
return tf.add_n([loss, wd_cost], name='cost')
开发者ID:quanlzheng,项目名称:tensorpack,代码行数:28,代码来源:CAM-resnet.py
示例6: additive_attention
def additive_attention(a, b, a_lengths, b_lengths, max_seq_len, hidden_units=150,
scope='additive-attention', reuse=False):
"""
For sequences a and b of lengths a_lengths and b_lengths, computes an attention matrix attn,
where attn(i, j) = dot(v, tanh(W*a_i + W*b_j)). v is a learnable vector and W is a learnable
matrix. The rows of attn are softmax normalized.
Args:
a: Input sequence a. Tensor of shape [batch_size, max_seq_len, input_size].
b: Input sequence b. Tensor of shape [batch_size, max_seq_len, input_size].
a_lengths: Lengths of sequences in a. Tensor of shape [batch_size].
b_lengths: Lengths of sequences in b. Tensor of shape [batch_size].
max_seq_len: Length of padded sequences a and b. Integer.
hidden_units: Number of hidden units. Integer.
Returns:
Attention matrix. Tensor of shape [max_seq_len, max_seq_len].
"""
with tf.variable_scope(scope, reuse=reuse):
aW = time_distributed_dense_layer(a, hidden_units, bias=False, scope='dense', reuse=False)
bW = time_distributed_dense_layer(b, hidden_units, bias=False, scope='dense', reuse=True)
aW = tf.expand_dims(aW, 2)
bW = tf.expand_dims(bW, 1)
v = tf.get_variable(
name='dot_weights',
initializer=tf.variance_scaling_initializer(),
shape=[hidden_units]
)
logits = tf.einsum('ijkl,l->ijk', tf.nn.tanh(aW + bW), v)
logits = logits - tf.expand_dims(tf.reduce_max(logits, axis=2), 2)
attn = tf.exp(logits)
attn = mask_attention_weights(attn, a_lengths, b_lengths, max_seq_len)
return attn / tf.expand_dims(tf.reduce_sum(attn, axis=2) + 1e-10, 2)
开发者ID:charlesjansen,项目名称:quora-duplicate-questions,代码行数:34,代码来源:attend.py
示例7: backbone_scope
def backbone_scope(freeze):
"""
Args:
freeze (bool): whether to freeze all the variables under the scope
"""
def nonlin(x):
x = get_norm()(x)
return tf.nn.relu(x)
with argscope([Conv2D, MaxPooling, BatchNorm], data_format='channels_first'), \
argscope(Conv2D, use_bias=False, activation=nonlin,
kernel_initializer=tf.variance_scaling_initializer(
scale=2.0, mode='fan_out')), \
ExitStack() as stack:
if cfg.BACKBONE.NORM in ['FreezeBN', 'SyncBN']:
if freeze or cfg.BACKBONE.NORM == 'FreezeBN':
stack.enter_context(argscope(BatchNorm, training=False))
else:
stack.enter_context(argscope(
BatchNorm, sync_statistics='nccl' if cfg.TRAINER == 'replicated' else 'horovod'))
if freeze:
stack.enter_context(freeze_variables(stop_gradient=False, skip_collection=True))
else:
# the layers are not completely freezed, but we may want to only freeze the affine
if cfg.BACKBONE.FREEZE_AFFINE:
stack.enter_context(custom_getter_scope(freeze_affine_getter))
yield
开发者ID:quanlzheng,项目名称:tensorpack,代码行数:28,代码来源:basemodel.py
示例8: conv2d_fixed_padding
def conv2d_fixed_padding(inputs,
filters,
kernel_size,
strides,
data_format="channels_first"):
"""Strided 2-D convolution with explicit padding.
The padding is consistent and is based only on `kernel_size`, not on the
dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
Args:
inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.
filters: `int` number of filters in the convolution.
kernel_size: `int` size of the kernel to be used in the convolution.
strides: `int` strides of the convolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A `Tensor` of shape `[batch, filters, height_out, width_out]`.
"""
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format=data_format)
return tf.layers.conv2d(
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=("SAME" if strides == 1 else "VALID"),
use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=data_format)
开发者ID:kltony,项目名称:tensor2tensor,代码行数:33,代码来源:resnet.py
示例9: __call__
def __call__(self, inputs, targets=None):
"""Calculate target logits or inferred target sequences.
Args:
inputs: int tensor with shape [batch_size, input_length].
targets: None or int tensor with shape [batch_size, target_length].
Returns:
If targets is defined, then return logits for each word in the target
sequence. float tensor with shape [batch_size, target_length, vocab_size]
If target is none, then generate output sequence one token at a time.
returns a dictionary {
output: [batch_size, decoded length]
score: [batch_size, float]}
"""
# Variance scaling is used here because it seems to work in many problems.
# Other reasonable initializers may also work just as well.
initializer = tf.variance_scaling_initializer(
self.params.initializer_gain, mode="fan_avg", distribution="uniform")
with tf.variable_scope("Transformer", initializer=initializer):
# Calculate attention bias for encoder self-attention and decoder
# multi-headed attention layers.
attention_bias = model_utils.get_padding_bias(inputs)
# Run the inputs through the encoder layer to map the symbol
# representations to continuous representations.
encoder_outputs = self.encode(inputs, attention_bias)
# Generate output sequence if targets is None, or return logits if target
# sequence is known.
if targets is None:
return self.predict(encoder_outputs, attention_bias)
else:
logits = self.decode(targets, encoder_outputs, attention_bias)
return logits
开发者ID:cybermaster,项目名称:reference,代码行数:35,代码来源:transformer.py
示例10: _fully_connected
def _fully_connected(self, x, out_dim):
w = tf.get_variable(
'DW', [x.get_shape()[1], out_dim],
initializer=tf.variance_scaling_initializer(distribution='uniform'))
b = tf.get_variable(
'biases', [out_dim], initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
开发者ID:812864539,项目名称:models,代码行数:7,代码来源:embedders.py
示例11: output
def output(self) -> tf.Tensor:
pooled_outputs = []
for filter_size, num_filters in self.filters:
with tf.variable_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, self.embedding_size, num_filters]
w_filter = get_variable(
"conv_W", filter_shape,
initializer=tf.variance_scaling_initializer(
mode="fan_avg", distribution="uniform"))
b_filter = get_variable(
"conv_bias", [num_filters],
initializer=tf.zeros_initializer())
conv = tf.nn.conv1d(
self.embedded_inputs,
w_filter,
stride=1,
padding="VALID",
name="conv")
# Apply nonlinearity
conv_relu = tf.nn.relu(tf.nn.bias_add(conv, b_filter))
# Max-pooling over the outputs
pooled = tf.reduce_max(conv_relu, 1)
pooled_outputs.append(pooled)
# Combine all the pooled features
return tf.concat(pooled_outputs, axis=1)
开发者ID:ufal,项目名称:neuralmonkey,代码行数:29,代码来源:sequence_cnn_encoder.py
示例12: build_graph
def build_graph(self, image, label):
assert tf.test.is_gpu_available()
MEAN_IMAGE = tf.constant([0.4914, 0.4822, 0.4465], dtype=tf.float32)
STD_IMAGE = tf.constant([0.2023, 0.1994, 0.2010], dtype=tf.float32)
image = ((image / 255.0) - MEAN_IMAGE) / STD_IMAGE
image = tf.transpose(image, [0, 3, 1, 2])
pytorch_default_init = tf.variance_scaling_initializer(scale=1.0 / 3, mode='fan_in', distribution='uniform')
with argscope([Conv2D, BatchNorm, GlobalAvgPooling], data_format='channels_first'), \
argscope(Conv2D, kernel_initializer=pytorch_default_init):
net = Conv2D('conv0', image, 64, kernel_size=3, strides=1, use_bias=False)
for i, blocks_in_module in enumerate(MODULE_SIZES):
for j in range(blocks_in_module):
stride = 2 if j == 0 and i > 0 else 1
with tf.variable_scope("res%d.%d" % (i, j)):
net = preactivation_block(net, FILTER_SIZES[i], stride)
net = GlobalAvgPooling('gap', net)
logits = FullyConnected('linear', net, CLASS_NUM,
kernel_initializer=tf.random_normal_initializer(stddev=1e-3))
ce_cost = tf.nn.softmax_cross_entropy_with_logits(labels=label, logits=logits)
ce_cost = tf.reduce_mean(ce_cost, name='cross_entropy_loss')
single_label = tf.to_int32(tf.argmax(label, axis=1))
wrong = tf.to_float(tf.logical_not(tf.nn.in_top_k(logits, single_label, 1)), name='wrong_vector')
# monitor training error
add_moving_summary(tf.reduce_mean(wrong, name='train_error'), ce_cost)
add_param_summary(('.*/W', ['histogram']))
# weight decay on all W matrixes. including convolutional layers
wd_cost = tf.multiply(WEIGHT_DECAY, regularize_cost('.*', tf.nn.l2_loss), name='wd_cost')
return tf.add_n([ce_cost, wd_cost], name='cost')
开发者ID:quanlzheng,项目名称:tensorpack,代码行数:34,代码来源:cifar10-preact18-mixup.py
示例13: __init__
def __init__(self,
name: str,
n_heads: int,
keys_encoder: Attendable,
values_encoder: Attendable = None,
dropout_keep_prob: float = 1.0,
reuse: ModelPart = None,
save_checkpoint: str = None,
load_checkpoint: str = None,
initializers: InitializerSpecs = None) -> None:
check_argument_types()
BaseAttention.__init__(self, name, reuse, save_checkpoint,
load_checkpoint, initializers)
self.n_heads = n_heads
self.dropout_keep_prob = dropout_keep_prob
self.keys_encoder = keys_encoder
if values_encoder is not None:
self.values_encoder = values_encoder
else:
self.values_encoder = self.keys_encoder
if self.n_heads <= 0:
raise ValueError("Number of heads must be greater than zero.")
if self.dropout_keep_prob <= 0.0 or self.dropout_keep_prob > 1.0:
raise ValueError("Dropout keep prob must be inside (0,1].")
self._variable_scope.set_initializer(tf.variance_scaling_initializer(
mode="fan_avg", distribution="uniform"))
开发者ID:ufal,项目名称:neuralmonkey,代码行数:32,代码来源:scaled_dot_product.py
示例14: get_tf_initializer
def get_tf_initializer(name="glorot"):
if name == "const":
return tf.constant_initializer(0.3)
elif name == "glorot":
return tf.variance_scaling_initializer(
scale=1.0, mode="fan_avg", distribution="normal")
elif name == "normal":
return tf.truncated_normal_initializer(dtype=tf.float32, stddev=0.36)
开发者ID:q64545,项目名称:x-deeplearning,代码行数:8,代码来源:utils.py
示例15: q_network
def q_network(state_tensor):
inputs = state_tensor
conv_outputs1 = tf.layers.conv2d(inputs, filters=32, kernel_size=(8,8), strides=4, padding='same', activation=tf.nn.relu, kernel_initializer=tf.variance_scaling_initializer())
conv_outputs2 = tf.layers.conv2d(conv_outputs1, filters=64, kernel_size=(4,4), strides=2, padding='same', activation=tf.nn.relu, kernel_initializer=tf.variance_scaling_initializer())
conv_outputs3 = tf.layers.conv2d(conv_outputs2, filters=64, kernel_size=(3,3), strides=1, padding='same', activation=tf.nn.relu, kernel_initializer=tf.variance_scaling_initializer())
flat_outputs = tf.reshape(conv_outputs3, shape=[-1, n_hidden_in])
dense_outputs = tf.layers.dense(flat_outputs, n_hidden, activation=tf.nn.relu, kernel_initializer=tf.variance_scaling_initializer())
outputs = tf.layers.dense(dense_outputs, n_outputs, kernel_initializer=tf.variance_scaling_initializer())
return outputs
开发者ID:sunmingtao,项目名称:sample-code,代码行数:9,代码来源:my-pacman-tensorflow-001.py
示例16: Deconv2D
def Deconv2D(x, out_channel, kernel_shape,
stride, padding='SAME',
W_init=None, b_init=None,
nl=tf.identity, use_bias=True,
data_format='NHWC'):
"""
2D deconvolution on 4D inputs.
Args:
x (tf.Tensor): a tensor of shape NHWC.
Must have known number of channels, but can have other unknown dimensions.
out_channel: the output number of channel.
kernel_shape: (h, w) tuple or a int.
stride: (h, w) tuple or a int.
padding (str): 'valid' or 'same'. Case insensitive.
W_init: initializer for W. Defaults to `tf.variance_scaling_initializer(2.0)`, i.e. kaiming-normal.
b_init: initializer for b. Defaults to zero.
nl: a nonlinearity function.
use_bias (bool): whether to use bias.
Returns:
tf.Tensor: a NHWC tensor named ``output`` with attribute `variables`.
Variable Names:
* ``W``: weights
* ``b``: bias
"""
in_shape = x.get_shape().as_list()
channel_axis = 3 if data_format == 'NHWC' else 1
in_channel = in_shape[channel_axis]
assert in_channel is not None, "[Deconv2D] Input cannot have unknown channel!"
assert isinstance(out_channel, int), out_channel
if W_init is None:
W_init = tf.variance_scaling_initializer(scale=2.0)
if b_init is None:
b_init = tf.constant_initializer()
with rename_get_variable({'kernel': 'W', 'bias': 'b'}):
layer = tf.layers.Conv2DTranspose(
out_channel, kernel_shape,
strides=stride, padding=padding,
data_format='channels_last' if data_format == 'NHWC' else 'channels_first',
activation=lambda x: nl(x, name='output'),
use_bias=use_bias,
kernel_initializer=W_init,
bias_initializer=b_init,
trainable=True)
ret = layer.apply(x, scope=tf.get_variable_scope())
ret.variables = VariableHolder(W=layer.kernel)
if use_bias:
ret.variables.b = layer.bias
return ret
开发者ID:caserzer,项目名称:tensorpack,代码行数:56,代码来源:conv2d.py
示例17: get_variable_initializer
def get_variable_initializer(hparams):
"""Get variable initializer from hparams."""
if not hparams.initializer:
return None
tf.logging.info("Using variable initializer: %s", hparams.initializer)
if hparams.initializer == "orthogonal":
return tf.orthogonal_initializer(gain=hparams.initializer_gain)
elif hparams.initializer == "uniform":
max_val = 0.1 * hparams.initializer_gain
return tf.random_uniform_initializer(-max_val, max_val)
elif hparams.initializer == "normal_unit_scaling":
return tf.variance_scaling_initializer(
hparams.initializer_gain, mode="fan_avg", distribution="normal")
elif hparams.initializer == "uniform_unit_scaling":
return tf.variance_scaling_initializer(
hparams.initializer_gain, mode="fan_avg", distribution="uniform")
else:
raise ValueError("Unrecognized initializer: %s" % hparams.initializer)
开发者ID:chqiwang,项目名称:tensor2tensor,代码行数:19,代码来源:optimize.py
示例18: fastrcnn_Xconv1fc_head
def fastrcnn_Xconv1fc_head(feature, num_classes, num_convs):
"""
Args:
feature (any shape):
num_classes(int): num_category + 1
num_convs (int): number of conv layers
Returns:
cls_logits (Nxnum_class), reg_logits (Nx num_class-1 x 4)
"""
l = feature
with argscope(Conv2D, data_format='channels_first',
kernel_initializer=tf.variance_scaling_initializer(
scale=2.0, mode='fan_out', distribution='normal')):
for k in range(num_convs):
l = Conv2D('conv{}'.format(k), l, cfg.FPN.FRCNN_CONV_HEAD_DIM, 3, activation=tf.nn.relu)
l = FullyConnected('fc', l, cfg.FPN.FRCNN_FC_HEAD_DIM,
kernel_initializer=tf.variance_scaling_initializer(), activation=tf.nn.relu)
return fastrcnn_outputs('outputs', l, num_classes)
开发者ID:tobyma,项目名称:tensorpack,代码行数:19,代码来源:model.py
示例19: embedded_inputs
def embedded_inputs(self) -> tf.Tensor:
with tf.variable_scope("input_projection"):
embedding_matrix = get_variable(
"word_embeddings",
[len(self.vocabulary), self.embedding_size],
initializer=tf.variance_scaling_initializer(
mode="fan_avg", distribution="uniform"))
return dropout(
tf.nn.embedding_lookup(embedding_matrix, self.inputs),
self.dropout_keep_prob,
self.train_mode)
开发者ID:ufal,项目名称:neuralmonkey,代码行数:11,代码来源:sequence_cnn_encoder.py
示例20: conv2d_fixed_padding
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):
"""Strided 2-D convolution with explicit padding."""
# The padding is consistent and is based only on `kernel_size`, not on the
# dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format)
return tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=data_format)
开发者ID:seasky100,项目名称:crnn,代码行数:12,代码来源:resnet.py
注:本文中的tensorflow.variance_scaling_initializer函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论