本文整理汇总了Python中neupy.utils.asfloat函数的典型用法代码示例。如果您正苦于以下问题:Python asfloat函数的具体用法?Python asfloat怎么用?Python asfloat使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了asfloat函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: quadratic_minimizer
def quadratic_minimizer(x_a, y_a, y_prime_a, x_b, y_b, bound_size_ratio=0.1):
"""
Finds the minimizer for a quadratic polynomial that
goes through the points (x_a, y_a), (x_b, y_b) with derivative
at x_a of y_prime_a.
Parameters
----------
x_a : float or theano variable
Left point ``a`` in the ``x`` axis.
y_a : float or theano variable
Output from function ``y`` at point ``a``.
y_prime_a : float or theano variable
Output from function ``y'`` (``y`` derivative) at
point ``a``.
x_b : float or theano variable
Right point ``a`` in the ``x`` axis.
y_b : float or theano variable
Output from function ``y`` at point ``b``.
bound_size_ratio : float
Value control acceptable bounds for interpolation. If value
close to one of the points interpolation result will be ignored.
The bigger ratio, the more likely to reject interpolation.
Value needs to be between ``0`` and ``1``. Defaults to ``0.1``.
Returns
-------
object
Theano variable that after evaluation is equal to
point ``x`` which is minimizer for quadratic function.
"""
if not 0 <= bound_size_ratio < 1:
raise ValueError("Value ``bound_size_ratio`` need to be a float "
"between 0 and 1, got {}".format(bound_size_ratio))
# The main formula works for the region [0, a] we need to
# shift function to the left side and put point ``a``
# at ``0`` position.
x_range = x_b - x_a
coef = (y_b - y_a - y_prime_a * x_range) / (x_range ** 2)
minimizer = -y_prime_a / (asfloat(2) * coef) + x_a
bound_size_ratio = asfloat(bound_size_ratio)
return T.switch(
sequential_or(
# Handle bad cases
T.eq(x_range, zero),
coef <= zero,
T.gt(minimizer, x_b - bound_size_ratio * x_range),
T.lt(minimizer, x_a + bound_size_ratio * x_range),
),
x_a + asfloat(0.5) * x_range,
# Since we shifted funciton to the left, we need to shift
# the result to the right to make it correct for
# the specified region. That's why we are adding ``x_a``
# at the end.
-y_prime_a / (asfloat(2) * coef) + x_a
)
开发者ID:mayblue9,项目名称:neupy,代码行数:60,代码来源:wolfe.py
示例2: init_param_updates
def init_param_updates(self, layer, parameter):
step = self.variables.step
epsilon = self.epsilon
parameter_shape = parameter.get_value().shape
prev_mean_squred_grad = theano.shared(
name="{}/prev-mean-squred-grad".format(parameter.name),
value=asfloat(np.zeros(parameter_shape)),
)
prev_mean_squred_dx = theano.shared(
name="{}/prev-mean-squred-dx".format(parameter.name),
value=asfloat(np.zeros(parameter_shape)),
)
gradient = T.grad(self.variables.error_func, wrt=parameter)
mean_squred_grad = (
self.decay * prev_mean_squred_grad +
(1 - self.decay) * gradient ** 2
)
parameter_delta = gradient * (
T.sqrt(prev_mean_squred_dx + epsilon) /
T.sqrt(mean_squred_grad + epsilon)
)
mean_squred_dx = (
self.decay * prev_mean_squred_dx +
(1 - self.decay) * parameter_delta ** 2
)
return [
(prev_mean_squred_grad, mean_squred_grad),
(prev_mean_squred_dx, mean_squred_dx),
(parameter, parameter - step * parameter_delta),
]
开发者ID:itdxer,项目名称:neupy,代码行数:34,代码来源:adadelta.py
示例3: test_upscale_layer
def test_upscale_layer(self):
input_value = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
]).reshape((1, 1, 2, 4))
expected_output = np.array([
[1, 1, 2, 2, 3, 3, 4, 4],
[1, 1, 2, 2, 3, 3, 4, 4],
[1, 1, 2, 2, 3, 3, 4, 4],
[5, 5, 6, 6, 7, 7, 8, 8],
[5, 5, 6, 6, 7, 7, 8, 8],
[5, 5, 6, 6, 7, 7, 8, 8],
]).reshape((1, 1, 6, 8))
upscale_layer = layers.Upscale((3, 2))
connection = layers.Input((1, 2, 4)) > upscale_layer
x = T.tensor4('x')
actual_output = upscale_layer.output(x)
actual_output = actual_output.eval({x: asfloat(input_value)})
np.testing.assert_array_almost_equal(
asfloat(expected_output),
actual_output
)
开发者ID:InSertCod3,项目名称:neupy,代码行数:25,代码来源:test_conv_layers.py
示例4: init_param_updates
def init_param_updates(self, layer, parameter):
step = self.variables.step
parameter_shape = T.shape(parameter).eval()
prev_delta = theano.shared(
name="{}/prev-delta".format(parameter.name),
value=asfloat(np.zeros(parameter_shape)),
)
prev_gradient = theano.shared(
name="{}/prev-grad".format(parameter.name),
value=asfloat(np.zeros(parameter_shape)),
)
gradient = T.grad(self.variables.error_func, wrt=parameter)
grad_delta = T.abs_(prev_gradient - gradient)
parameter_delta = ifelse(
T.eq(self.variables.epoch, 1),
gradient,
T.clip(
T.abs_(prev_delta) * gradient / grad_delta,
-self.upper_bound,
self.upper_bound
)
)
return [
(parameter, parameter - step * parameter_delta),
(prev_gradient, gradient),
(prev_delta, parameter_delta),
]
开发者ID:itdxer,项目名称:neupy,代码行数:30,代码来源:quickprop.py
示例5: test_mixture_of_experts
def test_mixture_of_experts(self):
dataset = datasets.load_diabetes()
data, target = asfloat(dataset.data), asfloat(dataset.target)
insize, outsize = data.shape[1], 1
input_scaler = preprocessing.MinMaxScaler((-1 ,1))
output_scaler = preprocessing.MinMaxScaler()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
input_scaler.fit_transform(data),
output_scaler.fit_transform(target.reshape(-1, 1)),
train_size=0.8
)
n_epochs = 10
scaled_y_test = output_scaler.inverse_transform(y_test)
scaled_y_test = scaled_y_test.reshape((y_test.size, 1))
# -------------- Train single GradientDescent -------------- #
bpnet = algorithms.GradientDescent(
(insize, 20, outsize),
step=0.1,
verbose=False
)
bpnet.train(x_train, y_train, epochs=n_epochs)
network_output = bpnet.predict(x_test)
network_error = rmsle(output_scaler.inverse_transform(network_output),
scaled_y_test)
# -------------- Train ensemlbe -------------- #
moe = algorithms.MixtureOfExperts(
networks=[
algorithms.Momentum(
(insize, 20, outsize),
step=0.1,
batch_size=1,
verbose=False
),
algorithms.Momentum(
(insize, 20, outsize),
step=0.1,
batch_size=1,
verbose=False
),
],
gating_network=algorithms.Momentum(
layers.Softmax(insize) > layers.Output(2),
step=0.1,
verbose=False
)
)
moe.train(x_train, y_train, epochs=n_epochs)
ensemble_output = moe.predict(x_test)
ensemlbe_error = rmsle(
output_scaler.inverse_transform(ensemble_output),
scaled_y_test
)
self.assertGreater(network_error, ensemlbe_error)
开发者ID:EdwardBetts,项目名称:neupy,代码行数:60,代码来源:test_mixtures_of_experts.py
示例6: init_param_updates
def init_param_updates(self, layer, parameter):
epoch = self.variables.epoch
step = self.variables.step
beta1 = self.beta1
beta2 = self.beta2
parameter_shape = T.shape(parameter).eval()
prev_first_moment = theano.shared(
name="{}/prev-first-moment".format(parameter.name),
value=asfloat(np.zeros(parameter_shape)),
)
prev_weighted_inf_norm = theano.shared(
name="{}/prev-weighted-inf-norm".format(parameter.name),
value=asfloat(np.zeros(parameter_shape)),
)
gradient = T.grad(self.variables.error_func, wrt=parameter)
first_moment = beta1 * prev_first_moment + (1 - beta1) * gradient
weighted_inf_norm = T.maximum(beta2 * prev_weighted_inf_norm,
T.abs_(gradient))
parameter_delta = (
(1 / (1 - beta1 ** epoch)) *
(first_moment / (weighted_inf_norm + self.epsilon))
)
return [
(prev_first_moment, first_moment),
(prev_weighted_inf_norm, weighted_inf_norm),
(parameter, parameter - step * parameter_delta),
]
开发者ID:itdxer,项目名称:neupy,代码行数:32,代码来源:adamax.py
示例7: init_variables
def init_variables(self):
super(ConjugateGradient, self).init_variables()
n_parameters = count_parameters(self.connection)
self.variables.update(
prev_delta=theano.shared(name="conj-grad/prev-delta", value=asfloat(np.zeros(n_parameters))),
prev_gradient=theano.shared(name="conj-grad/prev-gradient", value=asfloat(np.zeros(n_parameters))),
)
开发者ID:itdxer,项目名称:neupy,代码行数:8,代码来源:conjgrad.py
示例8: test_batch_norm_as_shared_variable
def test_batch_norm_as_shared_variable(self):
gamma = theano.shared(value=asfloat(np.ones(2)))
beta = theano.shared(value=asfloat(2 * np.ones(2)))
batch_norm = layers.BatchNorm(gamma=gamma, beta=beta)
layers.Input(10) > batch_norm
self.assertIs(gamma, batch_norm.gamma)
self.assertIs(beta, batch_norm.beta)
开发者ID:itdxer,项目名称:neupy,代码行数:9,代码来源:test_normalization_layers.py
示例9: test_concatenate_basic
def test_concatenate_basic(self):
concat_layer = layers.Concatenate(axis=1)
x1 = T.tensor4()
x2 = T.tensor4()
y = theano.function([x1, x2], concat_layer.output(x1, x2))
x1_tensor4 = asfloat(np.random.random((1, 2, 3, 4)))
x2_tensor4 = asfloat(np.random.random((1, 8, 3, 4)))
output = y(x1_tensor4, x2_tensor4)
self.assertEqual((1, 10, 3, 4), output.shape)
开发者ID:itdxer,项目名称:neupy,代码行数:12,代码来源:test_merge_layers.py
示例10: init_layers
def init_layers(self):
super(Quickprop, self).init_layers()
for layer in self.layers:
for parameter in layer.parameters:
parameter_shape = T.shape(parameter).eval()
parameter.prev_delta = theano.shared(
name="prev_delta_" + parameter.name,
value=asfloat(np.zeros(parameter_shape)),
)
parameter.prev_gradient = theano.shared(
name="prev_grad_" + parameter.name,
value=asfloat(np.zeros(parameter_shape)),
)
开发者ID:InSertCod3,项目名称:neupy,代码行数:13,代码来源:quickprop.py
示例11: init_layers
def init_layers(self):
super(Adadelta, self).init_layers()
for layer in self.layers:
for parameter in layer.parameters:
parameter_shape = T.shape(parameter).eval()
parameter.prev_mean_squred_grad = theano.shared(
name="prev_mean_squred_grad_" + parameter.name,
value=asfloat(np.zeros(parameter_shape)),
)
parameter.prev_mean_squred_dx = theano.shared(
name="prev_mean_squred_dx_" + parameter.name,
value=asfloat(np.zeros(parameter_shape)),
)
开发者ID:InSertCod3,项目名称:neupy,代码行数:13,代码来源:adadelta.py
示例12: init_layers
def init_layers(self):
super(Adamax, self).init_layers()
for layer in self.layers:
for parameter in layer.parameters:
parameter_shape = T.shape(parameter).eval()
parameter.prev_first_moment = theano.shared(
name="prev_first_moment_" + parameter.name,
value=asfloat(np.zeros(parameter_shape)),
)
parameter.prev_weighted_inf_norm = theano.shared(
name="prev_weighted_inf_norm_" + parameter.name,
value=asfloat(np.zeros(parameter_shape)),
)
开发者ID:EdwardBetts,项目名称:neupy,代码行数:13,代码来源:adamax.py
示例13: test_elementwise_basic
def test_elementwise_basic(self):
elem_layer = layers.Elementwise(merge_function=T.add)
x1 = T.matrix()
x2 = T.matrix()
y = theano.function([x1, x2], elem_layer.output(x1, x2))
x1_matrix = asfloat(np.random.random((10, 2)))
x2_matrix = asfloat(np.random.random((10, 2)))
expected_output = x1_matrix + x2_matrix
actual_output = y(x1_matrix, x2_matrix)
np.testing.assert_array_almost_equal(expected_output, actual_output)
开发者ID:itdxer,项目名称:neupy,代码行数:13,代码来源:test_merge_layers.py
示例14: test_jacobian_for_levenberg_marquardt
def test_jacobian_for_levenberg_marquardt(self):
w1 = theano.shared(name='w1', value=asfloat(np.array([[1]])))
b1 = theano.shared(name='b1', value=asfloat(np.array([0])))
w2 = theano.shared(name='w2', value=asfloat(np.array([[2]])))
b2 = theano.shared(name='b2', value=asfloat(np.array([1])))
x = T.matrix('x')
y = T.matrix('y')
output = ((x.dot(w1.T) + b1) ** 2).dot(w2.T) + b2
error_func = T.mean((y - output), axis=1)
x_train = asfloat(np.array([[1, 2, 3]]).T)
y_train = asfloat(np.array([[1, 2, 3]]).T)
output_expected = asfloat(np.array([[3, 9, 19]]).T)
np.testing.assert_array_almost_equal(
output.eval({x: x_train}),
output_expected
)
jacobian_expected = asfloat(np.array([
[-4, -4, -1, -1],
[-16, -8, -4, -1],
[-36, -12, -9, -1],
]))
jacobian_actual = compute_jacobian(error_func, [w1, b1, w2, b2])
np.testing.assert_array_almost_equal(
jacobian_expected,
jacobian_actual.eval({x: x_train, y: y_train})
)
开发者ID:itdxer,项目名称:neupy,代码行数:30,代码来源:test_levenberg_marquardt.py
示例15: test_categorical_hinge_without_one_hot_encoding
def test_categorical_hinge_without_one_hot_encoding(self):
targets = asfloat(np.array([2, 0]))
predictions = asfloat(np.array([
[0.1, 0.2, 0.7],
[0.0, 0.9, 0.1],
]))
expected = asfloat(np.array([0.5, 1.9]).mean())
prediction_var = T.matrix()
target_var = T.vector()
error_output = errors.categorical_hinge(target_var, prediction_var)
actual = error_output.eval({prediction_var: predictions,
target_var: targets})
self.assertAlmostEqual(expected, actual)
开发者ID:itdxer,项目名称:neupy,代码行数:15,代码来源:test_errors.py
示例16: initialize
def initialize(self):
super(BatchNorm, self).initialize()
input_shape = as_tuple(None, self.input_shape)
ndim = len(input_shape)
if self.axes is None:
# If ndim == 4 then axes = (0, 2, 3)
# If ndim == 2 then axes = (0,)
self.axes = tuple(axis for axis in range(ndim) if axis != 1)
if any(axis >= ndim for axis in self.axes):
raise ValueError("Cannot apply batch normalization on the axis "
"that doesn't exist.")
opposite_axes = find_opposite_axes(self.axes, ndim)
parameter_shape = [input_shape[axis] for axis in opposite_axes]
if any(parameter is None for parameter in parameter_shape):
unknown_dim_index = parameter_shape.index(None)
raise ValueError("Cannot apply batch normalization on the axis "
"with unknown size over the dimension #{} "
"(0-based indeces).".format(unknown_dim_index))
self.running_mean = theano.shared(
name='running_mean_{}'.format(self.layer_id),
value=asfloat(np.zeros(parameter_shape))
)
self.running_inv_std = theano.shared(
name='running_inv_std_{}'.format(self.layer_id),
value=asfloat(np.ones(parameter_shape))
)
if isinstance(self.gamma, number_type):
self.gamma = np.ones(parameter_shape) * self.gamma
if isinstance(self.beta, number_type):
self.beta = np.ones(parameter_shape) * self.beta
self.gamma = theano.shared(
name='gamma_{}'.format(self.layer_id),
value=asfloat(self.gamma),
)
self.beta = theano.shared(
name='beta_{}'.format(self.layer_id),
value=asfloat(self.beta),
)
self.parameters = [self.gamma, self.beta]
开发者ID:mayblue9,项目名称:neupy,代码行数:48,代码来源:normalization.py
示例17: golden_search
def golden_search(f, maxstep=50, maxiter=1024, tol=1e-5):
""" Identify best step for function in specific direction.
Parameters
----------
f : func
maxstep : float
Defaults to ``50``.
maxiter : int
Defaults to ``1024``.
tol : float
Defaults to ``1e-5``.
Returns
-------
float
Identified optimal step.
"""
golden_ratio = asfloat((math.sqrt(5) - 1) / 2)
def interval_reduction(a, b, c, d, tol):
fc = f(c)
fd = f(d)
a, b, c, d = ifelse(
T.lt(fc, fd),
[a, d, d - golden_ratio * (d - a), c],
[c, b, d, c + golden_ratio * (b - c)]
)
stoprule = theano.scan_module.until(
T.lt(T.abs_(c - d), tol)
)
return [a, b, c, d], stoprule
a = T.constant(asfloat(0))
b = maxstep
c = b - golden_ratio * (b - a)
d = a + golden_ratio * (b - a)
(a, b, c, d), _ = theano.scan(
interval_reduction,
outputs_info=[a, b, c, d],
non_sequences=[asfloat(tol)],
n_steps=maxiter
)
return (a[-1] + b[-1]) / 2
开发者ID:InSertCod3,项目名称:neupy,代码行数:48,代码来源:golden_search.py
示例18: create_shared_parameter
def create_shared_parameter(value, name, shape):
"""
Creates NN parameter as Theano shared variable.
Parameters
----------
value : array-like, Theano variable, scalar or Initializer
Default value for the parameter.
name : str
Shared variable name.
shape : tuple
Parameter's shape.
Returns
-------
Theano shared variable.
"""
if isinstance(value, (T.sharedvar.SharedVariable, T.Variable)):
return value
if isinstance(value, init.Initializer):
value = value.sample(shape)
return theano.shared(value=asfloat(value), name=name, borrow=True)
开发者ID:itdxer,项目名称:neupy,代码行数:26,代码来源:base.py
示例19: init_variables
def init_variables(self):
super(LeakStepAdaptation, self).init_variables()
n_parameters = count_parameters(self.connection)
self.variables.leak_average = theano.shared(
name='leak-step-adapt/leak-average',
value=asfloat(np.zeros(n_parameters)),
)
开发者ID:itdxer,项目名称:neupy,代码行数:7,代码来源:leak_step.py
示例20: create_shared_parameter
def create_shared_parameter(value, name, shape, init_method, bounds):
""" Creates NN parameter as Theano shared variable.
Parameters
----------
value : array-like, theano shared variable or None
Default value for the parameter. If value eqaul to ``None``
parameter will be created bsaed on the ``init_method`` value.
name : str
Sahred variable name.
shape : tuple
Parameter shape.
init_method : str
Weight initialization procedure name.
bounds : tuple
Specific parameter for the one of the ``init_method``
argument.
Returns
-------
Theano shared variable.
"""
if isinstance(value, T.sharedvar.TensorSharedVariable):
return value
if value is None:
value = generate_weight(shape, bounds, init_method)
return theano.shared(value=asfloat(value), name=name, borrow=True)
开发者ID:EdwardBetts,项目名称:neupy,代码行数:29,代码来源:base.py
注:本文中的neupy.utils.asfloat函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论