本文整理汇总了Python中tensorflow.python.ops.math_ops.floor函数的典型用法代码示例。如果您正苦于以下问题:Python floor函数的具体用法?Python floor怎么用?Python floor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了floor函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: compute_step
def compute_step(x_val, geometric=False):
if geometric:
# Consider geometric series where t_mul != 1
# 1 + t_mul + t_mul^2 ... = (1 - t_mul^i_restart) / (1 - t_mul)
# First find how many restarts were performed for a given x_val
# Find maximal integer i_restart value for which this equation holds
# x_val >= (1 - t_mul^i_restart) / (1 - t_mul)
# x_val * (1 - t_mul) <= (1 - t_mul^i_restart)
# t_mul^i_restart <= (1 - x_val * (1 - t_mul))
# tensorflow allows only log with base e
# i_restart <= log(1 - x_val * (1 - t_mul) / log(t_mul)
# Find how many restarts were performed
i_restart = math_ops.floor(
math_ops.log(c_one - x_val * (c_one - t_mul)) / math_ops.log(t_mul))
# Compute the sum of all restarts before the current one
sum_r = (c_one - t_mul ** i_restart) / (c_one - t_mul)
# Compute our position within the current restart
x_val = (x_val - sum_r) / t_mul ** i_restart
else:
# Find how many restarts were performed
i_restart = math_ops.floor(x_val)
# Compute our position within the current restart
x_val = x_val - i_restart
return i_restart, x_val
开发者ID:1000sprites,项目名称:tensorflow,代码行数:28,代码来源:sgdr_learning_rate_decay.py
示例2: compute_step
def compute_step(completed_fraction, geometric=False):
if geometric:
i_restart = math_ops.floor(math_ops.log(1.0 - completed_fraction * (
1.0 - t_mul)) / math_ops.log(t_mul))
sum_r = (1.0 - t_mul ** i_restart) / (1.0 - t_mul)
completed_fraction = (completed_fraction - sum_r) / t_mul ** i_restart
else:
i_restart = math_ops.floor(completed_fraction)
completed_fraction = completed_fraction - i_restart
return i_restart, completed_fraction
开发者ID:Lin-jipeng,项目名称:tensorflow,代码行数:13,代码来源:learning_rate_decay.py
示例3: compute_step
def compute_step(completed_fraction, geometric=False):
"""Helper for `cond` operation."""
if geometric:
i_restart = math_ops.floor(
math_ops.log(1.0 - completed_fraction * (1.0 - t_mul)) /
math_ops.log(t_mul))
sum_r = (1.0 - t_mul**i_restart) / (1.0 - t_mul)
completed_fraction = (completed_fraction - sum_r) / t_mul**i_restart
else:
i_restart = math_ops.floor(completed_fraction)
completed_fraction -= i_restart
return i_restart, completed_fraction
开发者ID:terrytangyuan,项目名称:tensorflow,代码行数:15,代码来源:learning_rate_schedule.py
示例4: dropout_selu_impl
def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name):
keep_prob = 1.0 - rate
x = ops.convert_to_tensor(x, name="x")
if isinstance(keep_prob, numbers.Real) and not 0. < keep_prob <= 1.:
raise ValueError("keep_prob must be a scalar tensor or a float in the "
"range (0, 1], got %g" % keep_prob)
keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
if tensor_util.constant_value(keep_prob) == 1:
return x
noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype)
binary_tensor = math_ops.floor(random_tensor)
ret = x * binary_tensor + alpha * (1-binary_tensor)
a = tf.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * tf.pow(alpha-fixedPointMean,2) + fixedPointVar)))
b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
ret = a * ret + b
ret.set_shape(x.get_shape())
return ret
开发者ID:waxz,项目名称:ppo_torcs,代码行数:27,代码来源:selu.py
示例5: _log_unnormalized_prob
def _log_unnormalized_prob(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
else:
# For consistency with cdf, we take the floor.
x = math_ops.floor(x)
return x * self.log_rate - math_ops.lgamma(1. + x)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:7,代码来源:poisson.py
示例6: _log_cdf
def _log_cdf(self, y):
low = self._low
high = self._high
# Recall the promise:
# cdf(y) := P[Y <= y]
# = 1, if y >= high,
# = 0, if y < low,
# = P[X <= y], otherwise.
# P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
# between.
j = math_ops.floor(y)
result_so_far = self.distribution.log_cdf(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
neg_inf = -np.inf * array_ops.ones_like(result_so_far)
result_so_far = array_ops.where(j < low, neg_inf, result_so_far)
if high is not None:
result_so_far = array_ops.where(j >= high,
array_ops.zeros_like(result_so_far),
result_so_far)
return result_so_far
开发者ID:finardi,项目名称:tensorflow,代码行数:30,代码来源:quantized_distribution.py
示例7: exponential_decay
def exponential_decay(learning_rate, global_step, decay_steps, decay_rate,
staircase=False, name=None):
"""Applies exponential decay to the learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires a `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate *
decay_rate ^ (global_step / decay_steps)
```
If the argument `staircase` is `True`, then `global_step /decay_steps` is an
integer division and the decayed learning rate follows a staircase function.
Example: decay every 100000 steps with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
learning_rate = tf.exponential_decay(starter_learning_rate, global_step,
100000, 0.96, staircase=True)
optimizer = tf.GradientDescent(learning_rate)
# Passing global_step to minimize() will increment it at each step.
optimizer.minimize(...my loss..., global_step=global_step)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
decay_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The decay rate.
staircase: Boolean. It `True` decay the learning rate at discrete intervals.
name: string. Optional name of the operation. Defaults to 'ExponentialDecay'
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
"""
with ops.op_scope([learning_rate, global_step, decay_steps, decay_rate],
name, "ExponentialDecay") as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
decay_steps = math_ops.cast(decay_steps, dtype)
decay_rate = math_ops.cast(decay_rate, dtype)
p = global_step / decay_steps
if staircase:
p = math_ops.floor(p)
return math_ops.mul(learning_rate, math_ops.pow(decay_rate, p), name=name)
开发者ID:bradg19,项目名称:tensor,代码行数:60,代码来源:learning_rate_decay.py
示例8: _cdf
def _cdf(self, x):
if self.validate_args:
# We set `check_integer=False` since the CDF is defined on whole real
# line.
x = distribution_util.embed_check_nonnegative_discrete(
x, check_integer=False)
return math_ops.igammac(math_ops.floor(x + 1), self.rate)
开发者ID:arnonhongklay,项目名称:tensorflow,代码行数:7,代码来源:poisson.py
示例9: testChi2WithAbsDf
def testChi2WithAbsDf(self):
with self.cached_session():
df_v = np.array([-1.3, -3.2, 5], dtype=np.float64)
chi2 = chi2_lib.Chi2WithAbsDf(df=df_v)
self.assertAllClose(
math_ops.floor(math_ops.abs(df_v)).eval(),
chi2.df.eval())
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:7,代码来源:chi2_test.py
示例10: _cdf
def _cdf(self, y):
lower_cutoff = self._lower_cutoff
upper_cutoff = self._upper_cutoff
# Recall the promise:
# cdf(y) := P[Y <= y]
# = 1, if y >= upper_cutoff,
# = 0, if y < lower_cutoff,
# = P[X <= y], otherwise.
# P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
# between.
j = math_ops.floor(y)
# P[X <= j], used when lower_cutoff < X < upper_cutoff.
result_so_far = self.distribution.cdf(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if lower_cutoff is not None:
result_so_far = math_ops.select(j < lower_cutoff,
array_ops.zeros_like(result_so_far),
result_so_far)
if upper_cutoff is not None:
result_so_far = math_ops.select(j >= upper_cutoff,
array_ops.ones_like(result_so_far),
result_so_far)
return result_so_far
开发者ID:Qstar,项目名称:tensorflow,代码行数:32,代码来源:quantized_distribution.py
示例11: inverse_time_decay
def inverse_time_decay(learning_rate, global_step, decay_steps, decay_rate,
staircase=False, name=None):
"""Applies inverse time decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an inverse decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate / (1 + decay_rate * t)
```
Example: decay 1/t with a rate of 0.5:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
k = 0.5
learning_rate = tf.train.inverse_time_decay(learning_rate, global_step, k)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A Python number.
Global step to use for the decay computation. Must not be negative.
decay_rate: A Python number. The decay rate.
name: String. Optional name of the operation. Defaults to
'InverseTimeDecay'
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
"""
with ops.name_scope(name, "InverseTimeDecay",
[learning_rate, global_step, decay_rate]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
decay_steps = math_ops.cast(decay_steps, dtype)
decay_rate = math_ops.cast(decay_rate, dtype)
p = global_step / decay_steps
if staircase:
p = math_ops.floor(p)
const = math_ops.cast(constant_op.constant(1), learning_rate.dtype)
denom = math_ops.add(const, math_ops.mul(decay_rate, p))
return math_ops.div(learning_rate, denom, name=name)
开发者ID:AriaAsuka,项目名称:tensorflow,代码行数:59,代码来源:learning_rate_decay.py
示例12: dropout
def dropout(self, input_, keep_prob):
with ops.op_scope([input_], None, "dropout") as name:
rands = keep_prob + random_ops.random_uniform(
array_ops.shape(input_))
floored = math_ops.floor(rands)
ret = input_ * math_ops.inv(keep_prob) * floored
ret.set_shape(input_.get_shape())
return ret
开发者ID:amharc,项目名称:jnp3,代码行数:8,代码来源:Model.py
示例13: _cdf
def _cdf(self, positive_counts):
if self.validate_args:
positive_counts = math_ops.floor(
distribution_util.embed_check_nonnegative_discrete(
positive_counts, check_integer=False))
return math_ops.betainc(
self.total_count, positive_counts + 1.,
math_ops.sigmoid(-self.logits))
开发者ID:arnonhongklay,项目名称:tensorflow,代码行数:8,代码来源:negative_binomial.py
示例14: decayed_lr
def decayed_lr():
"""Helper to recompute learning rate; most helpful in eager-mode."""
global_step_recomp = math_ops.cast(global_step, dtype)
p = global_step_recomp / decay_steps
if staircase:
p = math_ops.floor(p)
return math_ops.multiply(
learning_rate, math_ops.pow(decay_rate, p), name=name)
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:8,代码来源:learning_rate_decay.py
示例15: __init__
def __init__(self, df, validate_args=False, allow_nan_stats=True,
name="Chi2WithAbsDf"):
with ops.name_scope(name, values=[df]) as ns:
super(Chi2WithAbsDf, self).__init__(
df=math_ops.floor(math_ops.abs(df)),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
开发者ID:KalraA,项目名称:tensorflow,代码行数:8,代码来源:chi2.py
示例16: _cdf
def _cdf(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
else:
# Whether or not x is integer-form, the following is well-defined.
# However, scipy takes the floor, so we do too.
x = math_ops.floor(x)
return math_ops.igammac(1. + x, self.rate)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:8,代码来源:poisson.py
示例17: natural_exp_decay
def natural_exp_decay(learning_rate, global_step, decay_steps, decay_rate,
staircase=False, name=None):
"""Applies natural exponential decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * global_step)
```
Example: decay exponetially with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
k = 0.5
learning_rate = tf.train.exponential_time_decay(learning_rate, global_step, k)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A Python number.
Global step to use for the decay computation. Must not be negative.
decay_rate: A Python number. The decay rate.
name: String. Optional name of the operation. Defaults to
'ExponentialTimeDecay'
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
"""
with ops.op_scope([learning_rate, global_step, decay_rate],
name, "NaturalExpDecay") as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
decay_steps = math_ops.cast(decay_steps, dtype)
decay_rate = math_ops.cast(decay_rate, dtype)
p = global_step / decay_steps
if staircase:
p = math_ops.floor(p)
exponent = math_ops.exp(math_ops.mul(math_ops.neg(decay_rate), p))
return math_ops.mul(learning_rate, exponent, name=name)
开发者ID:10imaging,项目名称:tensorflow,代码行数:57,代码来源:learning_rate_decay.py
示例18: compute_cdf
def compute_cdf(values, value_range, **kwargs):
"""Returns the normalized cumulative distribution of the given values tensor.
Uses tf.while_loop to directly compute the cdf of the values. Number of bins
for histogram is fixed at _NBINS=255
Args:
values: Numeric `Tensor`.
value_range: Shape [2] `Tensor` of same `dtype` as `values`
**kwargs: keyword arguments: name
Returns:
A 1-D `Tensor` holding normalized cdf of values.
"""
nbins = _NBINS
name = kwargs.get('name', None)
with ops.name_scope(name, 'cdf', [values, value_range, nbins]):
values = ops.convert_to_tensor(values, name='values')
value_range = ops.convert_to_tensor(value_range, name='value_range')
nbins_float = np.float32(nbins)
# Map tensor values that fall within value_range to [0, 1].
scaled_values = math_ops.truediv(
values - value_range[0],
value_range[1] - value_range[0],
name='scaled_values')
# map tensor values within the open interval value_range to {0,.., nbins-1},
# values outside the open interval will be zero or less, or nbins or more.
indices = math_ops.floor(nbins_float * scaled_values, name='indices')
# Clip edge cases (e.g. value = value_range[1]) or "outliers."
indices = math_ops.cast(
clip_ops.clip_by_value(indices, 0, nbins_float - 1), dtypes.int32)
cdf = array_ops.zeros(nbins)
i = constant_op.constant(0)
def loop_cond(loop_count, _):
return math_ops.less(loop_count, nbins)
def loop_body(loop_count, cdf):
temp = math_ops.reduce_sum(
math_ops.cast(
math_ops.less_equal(indices, loop_count), dtypes.float32))
cdf = math_ops.add(
cdf,
array_ops.one_hot(
loop_count, depth=_NBINS, on_value=temp, off_value=0.0))
return [loop_count + 1, cdf]
_, cdf = control_flow_ops.while_loop(
loop_cond, loop_body, [i, cdf], maximum_iterations=nbins)
return math_ops.div(cdf, math_ops.reduce_max(cdf))
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:56,代码来源:pruning_utils.py
示例19: __init__
def __init__(self, df, mu, sigma, validate_args=False, allow_nan_stats=True, name="StudentTWithAbsDfSoftplusSigma"):
with ops.name_scope(name, values=[df, mu, sigma]) as ns:
super(StudentTWithAbsDfSoftplusSigma, self).__init__(
df=math_ops.floor(math_ops.abs(df)),
mu=mu,
sigma=nn.softplus(sigma),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns,
)
开发者ID:apollos,项目名称:tensorflow,代码行数:10,代码来源:student_t.py
示例20: _compare
def _compare(self, x):
np_floor, np_ceil = np.floor(x), np.ceil(x)
with self.cached_session() as sess:
inx = ops.convert_to_tensor(x)
ofloor, oceil = math_ops.floor(inx), math_ops.ceil(inx)
tf_floor, tf_ceil = sess.run([ofloor, oceil])
self.assertAllEqual(np_floor, tf_floor)
self.assertAllEqual(np_ceil, tf_ceil)
self.assertShapeEqual(np_floor, ofloor)
self.assertShapeEqual(np_ceil, oceil)
开发者ID:bunbutter,项目名称:tensorflow,代码行数:10,代码来源:cwise_ops_test.py
注:本文中的tensorflow.python.ops.math_ops.floor函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论