本文整理汇总了Python中pygpu.gpuarray.array函数的典型用法代码示例。如果您正苦于以下问题:Python array函数的具体用法?Python array怎么用?Python array使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了array函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: filter
def filter(self, data, strict=False, allow_downcast=None):
if strict:
if not isinstance(data, gpuarray.GpuArray):
raise TypeError("%s expected a GpuArray object." % self,
data, type(data))
if self.typecode != data.typecode:
raise TypeError("%s expected typecode %d (dtype %s), "
"got %d (dtype %s)." %
(self, self.typecode, self.dtype,
data.typecode, str(data.dtype)))
# fallthrough to ndim check
elif allow_downcast:
data = gpuarray.array(data, dtype=self.typecode, copy=False,
ndmin=len(self.broadcastable))
else:
up_dtype = scalar.upcast(self.dtype, data.dtype)
if up_dtype == self.dtype:
data = gpuarray.array(data, dtype=self.dtype, copy=False)
else:
raise TypeError("%s cannot store a value of dtype %s "
"without risking loss of precision." %
(self, data.dtype))
if self.ndim != data.ndim:
raise TypeError("Wrong number of dimensions: expected %s, "
"got %s with shape %s." % (self.ndim, data.ndim,
data.shape), data)
shp = data.shape
for i, b in enumerate(self.broadcastable):
if b and shp[i] != 1:
raise TypeError("Non-unit value on shape on a broadcastable"
" dimension.", shp, self.broadcastable)
return data
开发者ID:Jackwangyang,项目名称:Theano,代码行数:33,代码来源:type.py
示例2: filter
def filter(self, data, strict=False, allow_downcast=None):
if (isinstance(data, gpuarray.GpuArray) and
data.typecode == self.typecode):
# This is just to make this condition not enter the
# following branches
pass
elif strict:
if not isinstance(data, gpuarray.GpuArray):
raise TypeError("%s expected a GpuArray object." % self,
data, type(data))
if self.typecode != data.typecode:
raise TypeError("%s expected typecode %d (dtype %s), "
"got %d (dtype %s)." %
(self, self.typecode, self.dtype,
data.typecode, str(data.dtype)))
if self.context != data.context:
raise TypeError("data context does not match type context")
# fallthrough to ndim check
elif (allow_downcast or
(allow_downcast is None and
type(data) == float and
self.dtype == config.floatX)):
data = gpuarray.array(data, dtype=self.typecode, copy=False,
ndmin=len(self.broadcastable),
context=self.context)
else:
if not hasattr(data, 'dtype'):
converted_data = theano._asarray(data, self.dtype)
# We use the `values_eq` static function from TensorType
# to handle NaN values.
if TensorType.values_eq(numpy.asarray(data),
converted_data,
force_same_dtype=False):
data = converted_data
data = gpuarray.array(data, context=self.context)
up_dtype = scalar.upcast(self.dtype, data.dtype)
if up_dtype == self.dtype:
data = gpuarray.array(data, dtype=self.dtype, copy=False,
context=self.context)
else:
raise TypeError("%s cannot store a value of dtype %s "
"without risking loss of precision." %
(self, data.dtype))
if self.ndim != data.ndim:
raise TypeError("Wrong number of dimensions: expected %s, "
"got %s with shape %s." % (self.ndim, data.ndim,
data.shape), data)
shp = data.shape
for i, b in enumerate(self.broadcastable):
if b and shp[i] != 1:
raise TypeError("Non-unit value on shape on a broadcastable"
" dimension.", shp, self.broadcastable)
return data
开发者ID:wgapl,项目名称:Theano,代码行数:55,代码来源:type.py
示例3: filter
def filter(self, data, strict=False, allow_downcast=None):
if (isinstance(data, gpuarray.GpuArray) and
data.typecode == self.typecode):
# This is just to make this condition not enter the
# following branches
pass
elif strict:
if not isinstance(data, gpuarray.GpuArray):
raise TypeError("%s expected a GpuArray object." % self,
data, type(data))
if self.typecode != data.typecode:
raise TypeError("%s expected typecode %d (dtype %s), "
"got %d (dtype %s)." %
(self, self.typecode, self.dtype,
data.typecode, str(data.dtype)))
if self.context != data.context:
raise TypeError("data context does not match type context")
# fallthrough to ndim check
elif (allow_downcast or
(allow_downcast is None and
type(data) == float and
self.dtype == config.floatX)):
data = gpuarray.array(data, dtype=self.typecode, copy=False,
ndmin=len(self.broadcastable),
context=self.context)
else:
if not hasattr(data, 'dtype'):
# This is to convert objects that don't have a dtype
# (like lists). We anticipate that the type below
# will match and we pass copy=False so it won't make a
# second object on the GPU.
data = gpuarray.array(data, copy=False, context=self.context)
up_dtype = scalar.upcast(self.dtype, data.dtype)
if up_dtype == self.dtype:
data = gpuarray.array(data, dtype=self.dtype, copy=False,
context=self.context)
else:
raise TypeError("%s cannot store a value of dtype %s "
"without risking loss of precision." %
(self, data.dtype))
if self.ndim != data.ndim:
raise TypeError("Wrong number of dimensions: expected %s, "
"got %s with shape %s." % (self.ndim, data.ndim,
data.shape), data)
shp = data.shape
for i, b in enumerate(self.broadcastable):
if b and shp[i] != 1:
raise TypeError("Non-unit value on shape on a broadcastable"
" dimension.", shp, self.broadcastable)
return data
开发者ID:aalmah,项目名称:Theano,代码行数:52,代码来源:type.py
示例4: setUp
def setUp(self):
self.input = gpu_ftensor4()
self.filters = gpu_ftensor4()
self.topgrad = gpu_ftensor4()
self.constant_tensor = gpuarray.array(
numpy.zeros((3, 5, 7, 11), dtype='float32'),
context=get_context(test_ctx_name))
开发者ID:MikeAmy,项目名称:Theano,代码行数:7,代码来源:test_abstractconv.py
示例5: rand_gpuarray
def rand_gpuarray(*shape, **kwargs):
r = rng.rand(*shape) * 2 - 1
dtype = kwargs.pop("dtype", theano.config.floatX)
cls = kwargs.pop("cls", None)
if len(kwargs) != 0:
raise TypeError("Unexpected argument %s", list(kwargs.keys())[0])
return gpuarray.array(r, dtype=dtype, cls=cls, context=get_context(test_ctx_name))
开发者ID:kelvinxu,项目名称:Theano,代码行数:7,代码来源:test_basic_ops.py
示例6: gen_gpuarray
def gen_gpuarray(
shape_orig,
dtype="float32",
offseted_outer=False,
offseted_inner=False,
sliced=1,
order="c",
nozeros=False,
incr=0,
ctx=None,
cls=None,
):
if sliced is True:
sliced = 2
elif sliced is False:
sliced = 1
shape = numpy.asarray(shape_orig).copy()
if sliced != 1 and len(shape) > 0:
shape[0] *= numpy.absolute(sliced)
if offseted_outer and len(shape) > 0:
shape[0] += 1
if offseted_inner and len(shape) > 0:
shape[-1] += 1
low = 0.0
if nozeros:
low = 1.0
a = numpy.random.uniform(low, 10.0, shape)
a += incr
a = numpy.asarray(a, dtype=dtype)
assert order in ["c", "f"]
if order == "f" and len(shape) > 0:
a = numpy.asfortranarray(a)
b = gpuarray.array(a, context=ctx, cls=cls)
if order == "f" and len(shape) > 0 and b.size > 1:
assert b.flags["F_CONTIGUOUS"]
if offseted_outer and len(shape) > 0:
b = b[1:]
a = a[1:]
if offseted_inner and len(shape) > 0:
# The b[..., 1:] act as the test for this subtensor case.
b = b[..., 1:]
a = a[..., 1:]
if sliced != 1 and len(shape) > 0:
a = a[::sliced]
b = b[::sliced]
if False and shape_orig == ():
assert a.shape == (1,)
assert b.shape == (1,)
else:
assert a.shape == shape_orig, (a.shape, shape_orig)
assert b.shape == shape_orig, (b.shape, shape_orig)
assert numpy.allclose(a, numpy.asarray(b)), (a, numpy.asarray(b))
return a, b
开发者ID:xindongzhang,项目名称:libgpuarray,代码行数:60,代码来源:support.py
示例7: transfer_not_contiguous
def transfer_not_contiguous(shp, dtype):
a = numpy.random.rand(*shp) * 10
a = a[::-1]
b = gpu_ndarray.array(a, context=ctx)
c = numpy.asarray(b)
assert numpy.allclose(c, a)
assert a.shape == b.shape == c.shape
# the result array (c) is C contiguous
assert a.strides == b.strides == (-c.strides[0],) + c.strides[1:]
assert a.dtype == b.dtype == c.dtype
assert c.flags.c_contiguous
开发者ID:MaxBareiss,项目名称:libgpuarray,代码行数:12,代码来源:test_gpu_ndarray.py
示例8: test_transfer_gpu_gpu
def test_transfer_gpu_gpu():
g = GpuArrayType(dtype='float32', broadcastable=(False, False),
context_name=test_ctx_name)()
av = np.asarray(rng.rand(5, 4), dtype='float32')
gv = gpuarray.array(av, context=get_context(test_ctx_name))
mode = mode_with_gpu.excluding('cut_gpua_host_transfers', 'local_cut_gpua_host_gpua')
f = theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, GpuToGpu)
fv = f(gv)
assert GpuArrayType.values_eq(fv, gv)
开发者ID:DEVESHTARASIA,项目名称:Theano,代码行数:13,代码来源:test_basic_ops.py
示例9: test_transfer_cpu_gpu
def test_transfer_cpu_gpu():
a = T.fmatrix('a')
g = GpuArrayType(dtype='float32', broadcastable=(False, False))('g')
av = np.asarray(rng.rand(5, 4), dtype='float32')
gv = gpuarray.array(av, context=get_context(test_ctx_name))
f = theano.function([a], GpuFromHost(test_ctx_name)(a))
fv = f(av)
assert GpuArrayType.values_eq(fv, gv)
f = theano.function([g], host_from_gpu(g))
fv = f(gv)
assert np.all(fv == av)
开发者ID:DEVESHTARASIA,项目名称:Theano,代码行数:14,代码来源:test_basic_ops.py
示例10: transfer_fortran
def transfer_fortran(shp, dtype):
a = numpy.random.rand(*shp) * 10
a_ = numpy.asfortranarray(a)
if len(shp) > 1:
assert a_.strides != a.strides
a = a_
b = gpu_ndarray.array(a, context=ctx)
c = numpy.asarray(b)
assert a.shape == b.shape == c.shape
assert a.dtype == b.dtype == c.dtype
assert a.flags.f_contiguous
assert c.flags.f_contiguous
assert a.strides == b.strides == c.strides
assert numpy.allclose(c, a)
开发者ID:MaxBareiss,项目名称:libgpuarray,代码行数:15,代码来源:test_gpu_ndarray.py
示例11: test_transfer_strided
def test_transfer_strided():
# This is just to ensure that it works in theano
# libgpuarray has a much more comprehensive suit of tests to
# ensure correctness
a = T.fmatrix('a')
g = GpuArrayType(dtype='float32', broadcastable=(False, False))('g')
av = np.asarray(rng.rand(5, 8), dtype='float32')
gv = gpuarray.array(av, context=get_context(test_ctx_name))
av = av[:, ::2]
gv = gv[:, ::2]
f = theano.function([a], GpuFromHost(test_ctx_name)(a))
fv = f(av)
assert GpuArrayType.values_eq(fv, gv)
f = theano.function([g], host_from_gpu(g))
fv = f(gv)
assert np.all(fv == av)
开发者ID:DEVESHTARASIA,项目名称:Theano,代码行数:20,代码来源:test_basic_ops.py
示例12: as_gpuarray
def as_gpuarray(x):
return gpuarray.array(x, copy=False)
开发者ID:naisanza,项目名称:Theano,代码行数:2,代码来源:basic_ops.py
示例13: perform
def perform(self, node, inp, out):
x, = inp
z, = out
z[0] = gpuarray.array(numpy.asarray(x))
开发者ID:naisanza,项目名称:Theano,代码行数:4,代码来源:basic_ops.py
示例14: perform
def perform(self, node, inp, out, ctx):
x, = inp
z, = out
z[0] = gpuarray.array(x, context=ctx)
开发者ID:5730279821-TA,项目名称:Theano,代码行数:4,代码来源:basic_ops.py
示例15: thunk
def thunk():
context = inputs[0][0].context
# Size of the matrices to invert.
z = outputs[0]
# Matrix.
A = inputs[0][0]
# Solution vectors.
b = inputs[1][0]
assert(len(A.shape) == 2)
assert(len(b.shape) == 2)
if self.trans in ['T', 'C']:
trans = 1
l, n = A.shape
k, m = b.shape
elif self.trans == 'N':
trans = 0
n, l = A.shape
k, m = b.shape
else:
raise ValueError('Invalid value for trans')
if l != n:
raise ValueError('A must be a square matrix')
if n != k:
raise ValueError('A and b must be aligned.')
lda = max(1, n)
ldb = max(1, k, m)
# We copy A and b as cusolver operates inplace
b = gpuarray.array(b, copy=True, order='F')
if not self.inplace:
A = gpuarray.array(A, copy=True)
A_ptr = A.gpudata
b_ptr = b.gpudata
# cusolver expects a F ordered matrix, but A is not explicitly
# converted between C and F order, instead we switch the
# "transpose" flag.
if A.flags['C_CONTIGUOUS']:
trans = 1 - trans
workspace_size = cusolver.cusolverDnSgetrf_bufferSize(
cusolver_handle, n, n, A_ptr, lda)
if (thunk.workspace is None or
thunk.workspace.size != workspace_size):
thunk.workspace = gpuarray.zeros((workspace_size,),
dtype='float32',
context=context)
if thunk.pivots is None or thunk.pivots.size != min(n, n):
thunk.pivots = gpuarray.zeros((min(n, n),),
dtype='float32',
context=context)
if thunk.dev_info is None:
thunk.dev_info = gpuarray.zeros((1,),
dtype='float32',
context=context)
workspace_ptr = thunk.workspace.gpudata
pivots_ptr = thunk.pivots.gpudata
dev_info_ptr = thunk.dev_info.gpudata
cusolver.cusolverDnSgetrf(
cusolver_handle, n, n, A_ptr, lda, workspace_ptr,
pivots_ptr, dev_info_ptr)
cusolver.cusolverDnSgetrs(
cusolver_handle, trans, n, m, A_ptr, lda,
pivots_ptr, b_ptr, ldb, dev_info_ptr)
z[0] = b
开发者ID:bouthilx,项目名称:Theano,代码行数:78,代码来源:linalg.py
注:本文中的pygpu.gpuarray.array函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论