本文整理汇总了Python中theano.scalar.upcast函数的典型用法代码示例。如果您正苦于以下问题:Python upcast函数的具体用法?Python upcast怎么用?Python upcast使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了upcast函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: filter_inplace
def filter_inplace(self, data, old_data, strict=False, allow_downcast=None):
if strict or allow_downcast or isinstance(data, cuda.CudaNdarray):
return cuda.filter(data, self.broadcastable, strict, old_data)
else: # (not strict) and (not allow_downcast)
# Check if data.dtype can be accurately cast to self.dtype
if isinstance(data, numpy.ndarray):
up_dtype = scal.upcast(self.dtype, data.dtype)
if up_dtype == self.dtype:
return cuda.filter(data, self.broadcastable, strict, old_data)
else:
raise TypeError(
"%s, with dtype %s, cannot store a value of "
"dtype %s without risking loss of precision."
"If you do not mind, please cast your data to %s." % (self, self.dtype, data.dtype, self.dtype),
data,
)
else:
converted_data = theano._asarray(data, self.dtype)
if allow_downcast is None and type(data) is float and self.dtype == theano.config.floatX:
return cuda.filter(converted_data, self.broadcastable, strict, old_data)
elif numpy.all(data == converted_data):
return cuda.filter(converted_data, self.broadcastable, strict, old_data)
else:
raise TypeError(
"%s, with dtype %s, cannot store accurately value %s, "
"it would be represented as %s. If you do not mind, "
"you can cast your data to %s." % (self, self.dtype, data, converted_data, self.dtype),
data,
)
开发者ID:repos-python,项目名称:Theano,代码行数:31,代码来源:type.py
示例2: local_gpua_advanced_incsubtensor
def local_gpua_advanced_incsubtensor(node, context_name):
# This is disabled on non-cuda contexts
if get_context(context_name).kind != "cuda":
return None
x, y, ilist = node.inputs
# Gpu Ops needs both inputs to have the same dtype
if x.type.dtype != y.type.dtype:
dtype = scalar.upcast(x.type.dtype, y.type.dtype)
if x.type.dtype != dtype:
x = tensor.cast(x, dtype)
if y.type.dtype != dtype:
y = tensor.cast(y, dtype)
set_instead_of_inc = node.op.set_instead_of_inc
active_device_no = theano.sandbox.cuda.active_device_number()
device_properties = theano.sandbox.cuda.device_properties
compute_capability = device_properties(active_device_no)["major"]
if compute_capability < 2 or x.ndim != 2 or y.ndim != 2:
return GpuAdvancedIncSubtensor1(set_instead_of_inc=set_instead_of_inc)
else:
return GpuAdvancedIncSubtensor1_dev20(set_instead_of_inc=set_instead_of_inc)
开发者ID:rollingstone,项目名称:Theano,代码行数:26,代码来源:opt.py
示例3: test_reduce_custom_acc_dtype
def test_reduce_custom_acc_dtype(self):
# Test the ability to provide your own accumulator dtype for a reduce.
# We try multiple axis combinations even though axis should not matter.
idx = 0
for method in self.methods:
for input_dtype in self.dtypes:
x = tensor.matrix(dtype=input_dtype)
for acc_dtype in self.dtypes:
# If the accumulator is a complex, the gradient of the reduce will
# cast the complex to the input dtype. We can't call the normal
# cast on a complex to a not complex as this is ambiguous.
if not input_dtype.startswith("complex") and acc_dtype.startswith("complex"):
continue
axis = self.axes[idx % len(self.axes)]
# If output_dtype would force a downcast, we expect a TypeError
# We always allow int/uint inputs with float/complex outputs.
upcasted_dtype = scalar.upcast(input_dtype, acc_dtype)
if acc_dtype == upcasted_dtype or (
input_dtype in tensor.discrete_dtypes and acc_dtype in tensor.continuous_dtypes
):
var = getattr(x, method)(acc_dtype=acc_dtype, axis=axis)
assert var.owner.op.acc_dtype == acc_dtype
if "complex" in input_dtype:
continue
# Check that we can take the gradient
tensor.grad(var.sum(), x, disconnected_inputs="ignore")
else:
self.assertRaises(TypeError, getattr(x, method), acc_dtype=acc_dtype, axis=axis)
idx += 1
开发者ID:souravsingh,项目名称:Theano,代码行数:33,代码来源:test_elemwise.py
示例4: test_prod_without_zeros_custom_acc_dtype
def test_prod_without_zeros_custom_acc_dtype(self):
"""
Test ability to provide your own acc_dtype for a ProdWithoutZeros().
"""
# We try multiple axis combinations even though axis should not matter.
axes = [None, 0, 1, [], [0], [1], [0, 1]]
idx = 0
for input_dtype in imap(str, theano.scalar.all_types):
x = tensor.matrix(dtype=input_dtype)
for acc_dtype in imap(str, theano.scalar.all_types):
axis = axes[idx % len(axes)]
# If acc_dtype would force a downcast, we expect a TypeError
# We always allow int/uint inputs with float/complex outputs.
upcasted_dtype = scalar.upcast(input_dtype, acc_dtype)
if (acc_dtype == upcasted_dtype or
(input_dtype in tensor.discrete_dtypes and
acc_dtype in tensor.continuous_dtypes)
):
prod_woz_var = ProdWithoutZeros(
axis=axis, acc_dtype=acc_dtype)(x)
assert prod_woz_var.owner.op.acc_dtype == acc_dtype
if (acc_dtype.startswith('complex') and
input_dtype != acc_dtype):
continue
f = theano.function([x], prod_woz_var)
data = numpy.random.rand(2, 3) * 3
data = data.astype(input_dtype)
f(data)
else:
self.assertRaises(TypeError,
ProdWithoutZeros(axis=axis, acc_dtype=acc_dtype),
x)
idx += 1
开发者ID:AI-Cdrone,项目名称:Theano,代码行数:35,代码来源:test_elemwise.py
示例5: make_node
def make_node(self, a, val, offset):
a = tensor.as_tensor_variable(a)
val = tensor.as_tensor_variable(val)
offset = tensor.as_tensor_variable(offset)
if a.ndim != 2:
raise TypeError('%s: first parameter must have exactly'
' two dimensions' % self.__class__.__name__)
elif val.ndim != 0:
raise TypeError('%s: second parameter must be a scalar'\
% self.__class__.__name__)
elif offset.ndim != 0:
raise TypeError('%s: third parameter must be a scalar'\
% self.__class__.__name__)
val = tensor.cast(val, dtype=scalar.upcast(a.dtype, val.dtype))
if val.dtype != a.dtype:
raise TypeError('%s: type of second parameter must be the same'
' as the first\'s' % self.__class__.__name__)
elif offset.dtype[:3] != 'int':
raise TypeError('%s: type of third parameter must be as integer'
' use theano.tensor.cast( input, \'int32/int64\')' \
% self.__class__.__name__)
return gof.Apply(self, [a, val, offset], [a.type()])
开发者ID:MLevinson-OR,项目名称:Theano,代码行数:25,代码来源:extra_ops.py
示例6: local_gpua_advanced_incsubtensor
def local_gpua_advanced_incsubtensor(node, context_name):
context = get_context(context_name)
# This is disabled on non-cuda contexts
if context.kind != 'cuda':
return None
x, y, ilist = node.inputs
# Gpu Ops needs both inputs to have the same dtype
if (x.type.dtype != y.type.dtype):
dtype = scalar.upcast(x.type.dtype, y.type.dtype)
if x.type.dtype != dtype:
x = tensor.cast(x, dtype)
if y.type.dtype != dtype:
y = tensor.cast(y, dtype)
set_instead_of_inc = node.op.set_instead_of_inc
compute_capability = int(context.bin_id[-2])
if (compute_capability < 2 or x.ndim != 2 or y.ndim != 2):
return GpuAdvancedIncSubtensor1(
set_instead_of_inc=set_instead_of_inc)
else:
return GpuAdvancedIncSubtensor1_dev20(
set_instead_of_inc=set_instead_of_inc)
开发者ID:GeorgyKonoplich,项目名称:vehicle_detection,代码行数:26,代码来源:opt.py
示例7: test_prod_custom_dtype
def test_prod_custom_dtype(self):
"""
Test the ability to provide your own output dtype for a prod.
"""
# We try multiple axis combinations even though axis should not matter.
axes = [None, 0, 1, [0], [1], [0, 1]]
idx = 0
for input_dtype in imap(str, theano.scalar.all_types):
x = tensor.matrix(dtype=input_dtype)
for output_dtype in imap(str, theano.scalar.all_types):
axis = axes[idx % len(axes)]
# If output_dtype would force a downcast, we expect a TypeError
# We always allow int/uint inputs with float/complex outputs.
upcasted_dtype = scalar.upcast(input_dtype, output_dtype)
if (output_dtype == upcasted_dtype or
(input_dtype in tensor.discrete_dtypes and
output_dtype in tensor.continuous_dtypes)
):
prod_var = x.prod(dtype=output_dtype, axis=axis)
assert prod_var.dtype == output_dtype
if "complex" in output_dtype:
continue
# Check that we can take the gradient
grad_var = tensor.grad(prod_var.sum(), x,
disconnected_inputs='ignore')
else:
self.assertRaises(TypeError,
x.prod, dtype=output_dtype, axis=axis)
idx += 1
开发者ID:jaberg,项目名称:Theano,代码行数:31,代码来源:test_elemwise.py
示例8: filter
def filter(self, data, strict=False, allow_downcast=None):
if strict:
if not isinstance(data, gpuarray.GpuArray):
raise TypeError("%s expected a GpuArray object." % self,
data, type(data))
if self.typecode != data.typecode:
raise TypeError("%s expected typecode %d (dtype %s), "
"got %d (dtype %s)." %
(self, self.typecode, self.dtype,
data.typecode, str(data.dtype)))
# fallthrough to ndim check
elif allow_downcast:
data = gpuarray.array(data, dtype=self.typecode, copy=False,
ndmin=len(self.broadcastable))
else:
up_dtype = scalar.upcast(self.dtype, data.dtype)
if up_dtype == self.dtype:
data = gpuarray.array(data, dtype=self.dtype, copy=False)
else:
raise TypeError("%s cannot store a value of dtype %s "
"without risking loss of precision." %
(self, data.dtype))
if self.ndim != data.ndim:
raise TypeError("Wrong number of dimensions: expected %s, "
"got %s with shape %s." % (self.ndim, data.ndim,
data.shape), data)
shp = data.shape
for i, b in enumerate(self.broadcastable):
if b and shp[i] != 1:
raise TypeError("Non-unit value on shape on a broadcastable"
" dimension.", shp, self.broadcastable)
return data
开发者ID:Jackwangyang,项目名称:Theano,代码行数:33,代码来源:type.py
示例9: test_sum_custom_dtype
def test_sum_custom_dtype(self):
"""
Test the ability to provide your own output dtype for a sum.
"""
# We try multiple axis combinations even though axis should not matter.
axes = [None, 0, 1, [0], [1], [0, 1]]
idx = 0
for input_dtype in imap(str, theano.scalar.all_types):
x = tensor.matrix(dtype=input_dtype)
for output_dtype in imap(str, theano.scalar.all_types):
# If the output is a complex, the gradient of the sum will
# cast the complex to the input dtype. We can't call the normal
# cast on a complex to a not complex as this is ambiguous.
if not input_dtype.startswith("complex") and output_dtype.startswith("complex"):
continue
axis = axes[idx % len(axes)]
# If output_dtype would force a downcast, we expect a TypeError
# We always allow int/uint inputs with float/complex outputs.
upcasted_dtype = scalar.upcast(input_dtype, output_dtype)
if output_dtype == upcasted_dtype or (
input_dtype in tensor.discrete_dtypes and output_dtype in tensor.continuous_dtypes
):
sum_var = x.sum(dtype=output_dtype, axis=axis)
assert sum_var.dtype == output_dtype
if "complex" in input_dtype:
continue
# Check that we can take the gradient
grad_var = tensor.grad(sum_var.sum(), x, disconnected_inputs="ignore")
else:
self.assertRaises(TypeError, x.sum, dtype=output_dtype, axis=axis)
idx += 1
开发者ID:repos-python,项目名称:Theano,代码行数:34,代码来源:test_elemwise.py
示例10: local_gpua_advanced_incsubtensor
def local_gpua_advanced_incsubtensor(node):
# This optimization is disabled if cuda is not active
if pygpu.get_default_context().kind != "cuda":
return None
x, y, ilist = node.inputs
# Gpu Ops needs both inputs to have the same dtype
if (x.type.dtype != y.type.dtype):
dtype = scalar.upcast(x.type.dtype, y.type.dtype)
if x.type.dtype != dtype:
x = tensor.cast(x, dtype)
if y.type.dtype != dtype:
y = tensor.cast(y, dtype)
set_instead_of_inc = node.op.set_instead_of_inc
active_device_no = theano.sandbox.cuda.active_device_number()
device_properties = theano.sandbox.cuda.device_properties
compute_capability = device_properties(active_device_no)['major']
if (compute_capability < 2 or x.ndim != 2 or y.ndim != 2):
return [GpuAdvancedIncSubtensor1(
set_instead_of_inc=set_instead_of_inc)(x, y, ilist)]
else:
return [GpuAdvancedIncSubtensor1_dev20(
set_instead_of_inc=set_instead_of_inc)(x, y, ilist)]
开发者ID:ballasn,项目名称:Theano,代码行数:28,代码来源:opt.py
示例11: test_prod_without_zeros_custom_dtype
def test_prod_without_zeros_custom_dtype(self):
"""
Test the ability to provide your own output dtype for a ProdWithoutZeros().
"""
# We try multiple axis combinations even though axis should not matter.
axes = [None, 0, 1, [0], [1], [0, 1]]
idx = 0
for input_dtype in imap(str, theano.scalar.all_types):
x = tensor.matrix(dtype=input_dtype)
for output_dtype in imap(str, theano.scalar.all_types):
axis = axes[idx % len(axes)]
# If output_dtype would force a downcast, we expect a TypeError
# We always allow int/uint inputs with float/complex outputs.
upcasted_dtype = scalar.upcast(input_dtype, output_dtype)
if (output_dtype == upcasted_dtype or
(input_dtype in tensor.discrete_dtypes and
output_dtype in tensor.continuous_dtypes)
):
prod_woz_var = ProdWithoutZeros(
axis=axis, dtype=output_dtype)(x)
assert prod_woz_var.dtype == output_dtype
else:
self.assertRaises(TypeError,
ProdWithoutZeros(axis=axis, dtype=output_dtype),
x)
idx += 1
开发者ID:jaberg,项目名称:Theano,代码行数:27,代码来源:test_elemwise.py
示例12: make_node
def make_node(self, A, b):
A_ = tensor.as_tensor_variable(A)
b_ = tensor.as_tensor_variable(b)
if A_.broadcastable != (False, False):
raise TypeError("A must be a matrix", A_.type)
if b_.broadcastable not in ((False,), (True, False), (False, False)):
raise TypeError("b must be a matrix or vector", b_.type)
odtype = scalar.upcast(A_.dtype, b_.dtype)
otype = tensor.TensorType(broadcastable=b_.broadcastable, dtype=odtype)
return gof.Apply(op=self, inputs=[A, B], outputs=[otype()])
开发者ID:harlouci,项目名称:Theano,代码行数:10,代码来源:solve.py
示例13: filter
def filter(self, data, strict=False, allow_downcast=None):
if (isinstance(data, gpuarray.GpuArray) and
data.typecode == self.typecode):
# This is just to make this condition not enter the
# following branches
pass
elif strict:
if not isinstance(data, gpuarray.GpuArray):
raise TypeError("%s expected a GpuArray object." % self,
data, type(data))
if self.typecode != data.typecode:
raise TypeError("%s expected typecode %d (dtype %s), "
"got %d (dtype %s)." %
(self, self.typecode, self.dtype,
data.typecode, str(data.dtype)))
if self.context != data.context:
raise TypeError("data context does not match type context")
# fallthrough to ndim check
elif (allow_downcast or
(allow_downcast is None and
type(data) == float and
self.dtype == config.floatX)):
data = gpuarray.array(data, dtype=self.typecode, copy=False,
ndmin=len(self.broadcastable),
context=self.context)
else:
if not hasattr(data, 'dtype'):
converted_data = theano._asarray(data, self.dtype)
# We use the `values_eq` static function from TensorType
# to handle NaN values.
if TensorType.values_eq(numpy.asarray(data),
converted_data,
force_same_dtype=False):
data = converted_data
data = gpuarray.array(data, context=self.context)
up_dtype = scalar.upcast(self.dtype, data.dtype)
if up_dtype == self.dtype:
data = gpuarray.array(data, dtype=self.dtype, copy=False,
context=self.context)
else:
raise TypeError("%s cannot store a value of dtype %s "
"without risking loss of precision." %
(self, data.dtype))
if self.ndim != data.ndim:
raise TypeError("Wrong number of dimensions: expected %s, "
"got %s with shape %s." % (self.ndim, data.ndim,
data.shape), data)
shp = data.shape
for i, b in enumerate(self.broadcastable):
if b and shp[i] != 1:
raise TypeError("Non-unit value on shape on a broadcastable"
" dimension.", shp, self.broadcastable)
return data
开发者ID:wgapl,项目名称:Theano,代码行数:55,代码来源:type.py
示例14: make_node
def make_node(self, x, y, p):
x = tensor.as_tensor_variable(x)
y = tensor.as_tensor_variable(y)
if not _is_sparse_variable(p):
raise TypeError(p)
#TODO: use it.
dtype_out = scalar.upcast(x.type.dtype, y.type.dtype, p.type.dtype)
return gof.Apply(self, [x, y, p], [p.type()])
开发者ID:lberrada,项目名称:Theano,代码行数:11,代码来源:sp2.py
示例15: make_node
def make_node(self, a, val):
a = tensor.as_tensor_variable(a)
val = tensor.as_tensor_variable(val)
if a.ndim < 2:
raise TypeError("%s: first parameter must have at least" " two dimensions" % self.__class__.__name__)
elif val.ndim != 0:
raise TypeError("%s: second parameter must be a scalar" % self.__class__.__name__)
val = tensor.cast(val, dtype=scalar.upcast(a.dtype, val.dtype))
if val.dtype != a.dtype:
raise TypeError("%s: type of second parameter must be the same as" " the first's" % self.__class__.__name__)
return gof.Apply(self, [a, val], [a.type()])
开发者ID:Theano,项目名称:Theano,代码行数:11,代码来源:extra_ops.py
示例16: filter
def filter(self, data, strict=False, allow_downcast=None):
if (isinstance(data, gpuarray.GpuArray) and
data.typecode == self.typecode):
# This is just to make this condition not enter the
# following branches
pass
elif strict:
if not isinstance(data, gpuarray.GpuArray):
raise TypeError("%s expected a GpuArray object." % self,
data, type(data))
if self.typecode != data.typecode:
raise TypeError("%s expected typecode %d (dtype %s), "
"got %d (dtype %s)." %
(self, self.typecode, self.dtype,
data.typecode, str(data.dtype)))
if self.context != data.context:
raise TypeError("data context does not match type context")
# fallthrough to ndim check
elif (allow_downcast or
(allow_downcast is None and
type(data) == float and
self.dtype == config.floatX)):
data = gpuarray.array(data, dtype=self.typecode, copy=False,
ndmin=len(self.broadcastable),
context=self.context)
else:
if not hasattr(data, 'dtype'):
# This is to convert objects that don't have a dtype
# (like lists). We anticipate that the type below
# will match and we pass copy=False so it won't make a
# second object on the GPU.
data = gpuarray.array(data, copy=False, context=self.context)
up_dtype = scalar.upcast(self.dtype, data.dtype)
if up_dtype == self.dtype:
data = gpuarray.array(data, dtype=self.dtype, copy=False,
context=self.context)
else:
raise TypeError("%s cannot store a value of dtype %s "
"without risking loss of precision." %
(self, data.dtype))
if self.ndim != data.ndim:
raise TypeError("Wrong number of dimensions: expected %s, "
"got %s with shape %s." % (self.ndim, data.ndim,
data.shape), data)
shp = data.shape
for i, b in enumerate(self.broadcastable):
if b and shp[i] != 1:
raise TypeError("Non-unit value on shape on a broadcastable"
" dimension.", shp, self.broadcastable)
return data
开发者ID:aalmah,项目名称:Theano,代码行数:52,代码来源:type.py
示例17: test_prod_custom_acc_dtype
def test_prod_custom_acc_dtype(self):
"""
Test the ability to provide your own acc_dtype for a prod.
"""
# We try multiple axis combinations even though axis should not matter.
axes = [None, 0, 1, [], [0], [1], [0, 1]]
idx = 0
for input_dtype in imap(str, theano.scalar.all_types):
x = tensor.matrix(dtype=input_dtype)
for acc_dtype in imap(str, theano.scalar.all_types):
axis = axes[idx % len(axes)]
# If acc_dtype would force a downcast, we expect a TypeError
# We always allow int/uint inputs with float/complex outputs.
upcasted_dtype = scalar.upcast(input_dtype, acc_dtype)
if (acc_dtype == upcasted_dtype or
(input_dtype in tensor.discrete_dtypes and
acc_dtype in tensor.continuous_dtypes)
):
prod_var = x.prod(acc_dtype=acc_dtype, axis=axis)
assert prod_var.owner.op.acc_dtype == acc_dtype
if (acc_dtype.startswith('complex') and
input_dtype != acc_dtype):
continue
f = theano.function([x], prod_var)
data = numpy.random.rand(3, 4) * 10
data = data.astype(input_dtype)
f(data)
if "complex" in acc_dtype:
continue
# Check that we can take the gradient
tensor.grad(prod_var.sum(), x,
disconnected_inputs='ignore')
else:
self.assertRaises(TypeError,
x.prod, acc_dtype=acc_dtype, axis=axis)
idx += 1
开发者ID:SamuelZeng,项目名称:Theano,代码行数:39,代码来源:test_elemwise.py
示例18: local_usmm_csx
def local_usmm_csx(node):
""" usmm -> usmm_csc_dense """
if node.op == usmm:
alpha, x, y, z = node.inputs
x_is_sparse_variable = _is_sparse_variable(x)
y_is_sparse_variable = _is_sparse_variable(y)
if x_is_sparse_variable and not y_is_sparse_variable:
if x.type.format == 'csc':
x_val, x_ind, x_ptr, x_shape = csm_properties(x)
x_nsparse = x_shape[0]
dtype_out = scalar.upcast(alpha.type.dtype, x.type.dtype,
y.type.dtype, z.type.dtype)
if dtype_out not in ('float32', 'float64'):
return False
# Sparse cast is not implemented.
if y.type.dtype != dtype_out:
return False
return [usmm_csc_dense(alpha, x_val, x_ind, x_ptr,
x_nsparse, y, z)]
return False
开发者ID:npinto,项目名称:Theano,代码行数:23,代码来源:opt.py
示例19: filter
def filter(self, data, strict=False, allow_downcast=None):
"""Convert `data` to something which can be associated to a
`TensorVariable`.
This function is not meant to be called in user code. It is for
`Linker` instances to use when running a compiled graph.
"""
# Explicit error message when one accidentally uses a Variable as
# input (typical mistake, especially with shared variables).
if isinstance(data, Variable):
raise TypeError(
'Expected an array-like object, but found a Variable: '
'maybe you are trying to call a function on a (possibly '
'shared) variable instead of a numeric array?')
if ((type(data) is numpy.ndarray)
and (data.dtype == self.numpy_dtype)):
if data.dtype.num != self.numpy_dtype.num:
data = theano._asarray(data, dtype=self.dtype)
# -- now fall through to ndim check
elif((type(data) is numpy.memmap)
and (data.dtype == self.numpy_dtype)):
# numpy.memmap is a "safe" subclass of ndarray,
# so we can use it whereever we expect a base ndarray.
# however, casting it would defeat the purpose of not
# loading the whole data into memory
pass
elif strict:
# If any of the two conditions above was not met,
# we raise a meaningful TypeError.
if not (type(data) is numpy.ndarray):
raise TypeError("%s expected a ndarray object." % self,
data, type(data))
if data.dtype != self.numpy_dtype:
raise TypeError(("%s expected a ndarray object with "
"dtype = %s (got %s).") % (
self, self.numpy_dtype, data.dtype))
assert False, "This point should never be reached."
else:
if allow_downcast:
# Convert to self.dtype, regardless of the type of data
data = theano._asarray(data, dtype=self.dtype)
# TODO: consider to pad shape with ones to make it consistent
# with self.broadcastable... like vector->row type thing
else:
if isinstance(data, numpy.ndarray):
# Check if self.dtype can accurately represent data
# (do not try to convert the data)
up_dtype = scal.upcast(self.dtype, data.dtype)
if up_dtype == self.dtype:
# Bug in the following line when data is a
# scalar array, see
# http://projects.scipy.org/numpy/ticket/1611
# data = data.astype(self.dtype)
data = theano._asarray(data, dtype=self.dtype)
if up_dtype != self.dtype:
err_msg = (
'%s cannot store a value of dtype %s without '
'risking loss of precision. If you do not mind '
'this loss, you can: '
'1) explicitly cast your data to %s, or '
'2) set "allow_input_downcast=True" when calling '
'"function".'
% (self, data.dtype, self.dtype))
raise TypeError(err_msg, data)
elif (allow_downcast is None and
type(data) is float and
self.dtype == theano.config.floatX):
# Special case where we allow downcasting of Python float
# literals to floatX, even when floatX=='float32'
data = theano._asarray(data, self.dtype)
else:
# data has to be converted.
# Check that this conversion is lossless
converted_data = theano._asarray(data, self.dtype)
# We use the `values_eq` static function from TensorType
# to handle NaN values.
if TensorType.values_eq(numpy.asarray(data),
converted_data,
force_same_dtype=False):
data = converted_data
else:
# Do not print a too long description of data
# (ndarray truncates it, but it's not sure for data)
str_data = str(data)
if len(str_data) > 80:
str_data = str_data[:75] + '(...)'
err_msg = (
'%s cannot store accurately value %s, '
'it would be represented as %s. '
'If you do not mind this precision loss, you can: '
'1) explicitly convert your data to a numpy array '
'of dtype %s, or '
'2) set "allow_input_downcast=True" when calling '
'"function".'
% (self, data, converted_data, self.dtype))
raise TypeError(err_msg, data)
if self.ndim != data.ndim:
#.........这里部分代码省略.........
开发者ID:alimuldal,项目名称:Theano,代码行数:101,代码来源:type.py
示例20: c_code
def c_code(self, node, name, inputs, outputs, sub):
x, y, p_data, p_ind, p_ptr, p_ncols = inputs
z_data, z_ind, z_ptr = outputs
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for x')
if node.inputs[1].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for y')
if node.inputs[2].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError(
'Complex types are not supported for pattern')
dot_out = scalar.upcast(node.inputs[0].type.dtype,
node.inputs[1].type.dtype)
if dot_out == "float32":
conv_type = "float"
cdot = "sdot_"
else:
conv_type = "double"
cdot = "ddot_"
# retrieve dtype number
typenum_x = node.inputs[0].type.dtype_specs()[-1]
typenum_y = node.inputs[1].type.dtype_specs()[-1]
typenum_p = node.inputs[2].type.dtype_specs()[-1]
typenum_zd = tensor.TensorType(node.outputs[0].dtype,
[]).dtype_specs()[-1]
typenum_zi = tensor.TensorType(node.outputs[1].dtype,
[]).dtype_specs()[-1]
typenum_zp = tensor.TensorType(node.outputs[2].dtype,
[]).dtype_specs()[-1]
rval = """
if (%(x)s->nd != 2) {
PyErr_SetString(PyExc_NotImplementedError, "rank(x) != 2"); %(fail)s;}
if (%(y)s->nd != 2) {
PyErr_SetString(PyExc_NotImplementedError, "rank(y) != 2"); %(fail)s;}
if (%(x)s->descr->type_num != %(typenum_x)s) {
PyErr_SetString(PyExc_NotImplementedError,
"Invalid type for x");
%(fail)s;}
if (%(y)s->descr->type_num != %(typenum_y)s) {
PyErr_SetString(PyExc_NotImplementedError,
"Invalid type for y");
%(fail)s;}
if (%(p_data)s->descr->type_num != %(typenum_p)s) {
PyErr_SetString(PyExc_NotImplementedError,
"Invalid type for pattern");
%(fail)s;}
if (%(x)s->dimensions[1] != %(y)s->dimensions[1]) {
PyErr_SetString(PyExc_NotImplementedError,
"x's number of columns doesn't match y's rows! Note: sampling_dot is different from dot because y is assumed to be transposed.");
%(fail)s;}
if (%(y)s->dimensions[0] != ((npy_int32 *)%(p_ncols)s->data)[0] ||
%(x)s->dimensions[0] != (%(p_ptr)s->dimensions[0] - 1))
{PyErr_SetString(PyExc_NotImplementedError,
"The dimension of the pattern and the output must match"); %(fail)s;}
// Allocate output
if (!%(z_data)s
|| (%(z_data)s->dimensions[0] != %(p_data)s->dimensions[0])
|| (%(z_data)s->descr->type_num != %(typenum_zd)s)) {
{Py_XDECREF(%(z_data)s);}
npy_intp dims[] = {0};
dims[0] = %(p_data)s->dimensions[0];
%(z_data)s = (PyArrayObject*) PyArray_SimpleNew(1, dims,
%(typenum_zd)s);
}
if (!%(z_ind)s
|| (%(z_ind)s->dimensions[0] != %(p_ind)s->dimensions[0])
|| (%(z_ind)s->descr->type_num != %(typenum_zi)s)) {
{Py_XDECREF(%(z_ind)s);}
npy_intp dims[] = {0};
dims[0] = %(p_ind)s->dimensions[0];
%(z_ind)s = (PyArrayObject*) PyArray_SimpleNew(1, dims,
%(typenum_zi)s);
}
if (!%(z_ptr)s
|| (%(z_ptr)s->dimensions[0] != %(p_ptr)s->dimensions[0])
|| (%(z_ptr)s->descr->type_num != %(typenum_zp)s)) {
{Py_XDECREF(%(z_ptr)s);}
npy_intp dims[] = {0};
dims[0] = %(p_ptr)s->dimensions[0];
%(z_ptr)s = (PyArrayObject*) PyArray_SimpleNew(1, dims,
%(typenum_zp)s);
}
{
// Product of MxK and NxK, output MxN
npy_intp M = %(x)s->dimensions[0];
npy_intp N = %(y)s->dimensions[0];
npy_intp K = %(y)s->dimensions[1];
// pointers to access actual data in the arrays passed as params.
const dtype_%(x)s* __restrict__ Dx = (dtype_%(x)s*)%(x)s->data;
#.........这里部分代码省略.........
开发者ID:lberrada,项目名称:Theano,代码行数:101,代码来源:sp2.py
|
请发表评论