本文整理汇总了Python中misc.select_block_grid_sizes函数的典型用法代码示例。如果您正苦于以下问题:Python select_block_grid_sizes函数的具体用法?Python select_block_grid_sizes怎么用?Python select_block_grid_sizes使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了select_block_grid_sizes函数的16个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: eye
def eye(N, dtype=np.float32):
"""
Construct a 2D matrix with ones on the diagonal and zeros elsewhere.
Constructs a matrix in device memory whose diagonal elements
are set to 1 and non-diagonal elements are set to 0.
Parameters
----------
N : int
Number of rows or columns in the output matrix.
Returns
-------
e_gpu : pycuda.gpuarray.GPUArray
Diagonal matrix of dimensions `[N, N]` with diagonal values
set to 1.
Examples
--------
>>> import pycuda.driver as drv
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import linalg
>>> linalg.init()
>>> N = 5
>>> e_gpu = linalg.eye(N)
>>> np.all(e_gpu.get() == np.eye(N))
True
>>> e_gpu = linalg.eye(v_gpu, np.complex64)
>>> np.all(e_gpu.get() == np.eye(N, np.complex64))
True
"""
if dtype not in [np.float32, np.float64, np.complex64, np.complex128]:
raise ValueError("unrecognized type")
if N <= 0:
raise ValueError("N must be greater than 0")
use_double = int(dtype in [np.float64, np.complex128])
use_complex = int(dtype in [np.complex64, np.complex128])
# Initialize output matrix:
e_gpu = misc.zeros((N, N), dtype)
# Get block/grid sizes:
dev = misc.get_current_device()
block_dim, grid_dim = misc.select_block_grid_sizes(dev, e_gpu.shape)
# Set this to False when debugging to make sure the compiled kernel is
# not cached:
cache_dir = None
eye_mod = SourceModule(eye_template.substitute(use_double=use_double, use_complex=use_complex), cache_dir=cache_dir)
eye = eye_mod.get_function("eye")
eye(e_gpu, np.uint32(N), block=block_dim, grid=grid_dim)
return e_gpu
开发者ID:jellis18,项目名称:scikits.cuda,代码行数:60,代码来源:linalg.py
示例2: e1z
def e1z(z_gpu, dev):
"""
Exponential integral with `n = 1` of complex arguments.
Parameters
----------
x_gpu : GPUArray
Input matrix of shape `(m, n)`.
dev : pycuda.driver.Device
Device object to be used.
Returns
-------
e_gpu : GPUArray
GPUarrays containing the exponential integrals of
the entries of `z_gpu`.
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import scipy.special
>>> import special
>>> z = np.asarray(np.random.rand(4, 4)+1j*np.random.rand(4, 4), np.complex64)
>>> z_gpu = gpuarray.to_gpu(z)
>>> e_gpu = e1z(z_gpu, pycuda.autoinit.device)
>>> e_sp = scipy.special.exp1(z)
>>> np.allclose(e_sp, e_gpu.get())
True
"""
if z_gpu.dtype == np.complex64:
use_double = 0
elif z_gpu.dtype == np.complex128:
use_double = 1
else:
raise ValueError("unsupported type")
# Get block/grid sizes:
max_threads_per_block, max_block_dim, max_grid_dim = get_dev_attrs(dev)
block_dim, grid_dim = select_block_grid_sizes(dev, z_gpu.shape)
max_blocks_per_grid = max(max_grid_dim)
# Set this to False when debugging to make sure the compiled kernel is
# not cached:
cache_dir = None
e1z_mod = SourceModule(
e1z_mod_template.substitute(
use_double=use_double, max_threads_per_block=max_threads_per_block, max_blocks_per_grid=max_blocks_per_grid
),
cache_dir=cache_dir,
options=["-I", install_headers],
)
e1z_func = e1z_mod.get_function("e1z")
e_gpu = gpuarray.empty_like(z_gpu)
e1z_func(z_gpu.gpudata, e_gpu.gpudata, np.uint32(z_gpu.size), block=block_dim, grid=grid_dim)
return e_gpu
开发者ID:stefanv,项目名称:scikits.cuda,代码行数:60,代码来源:special.py
示例3: e1z
def e1z(z_gpu):
"""
Exponential integral with `n = 1` of complex arguments.
Parameters
----------
x_gpu : GPUArray
Input matrix of shape `(m, n)`.
Returns
-------
e_gpu : GPUArray
GPUarrays containing the exponential integrals of
the entries of `z_gpu`.
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import scipy.special
>>> import special
>>> z = np.asarray(np.random.rand(4, 4)+1j*np.random.rand(4, 4), np.complex64)
>>> z_gpu = gpuarray.to_gpu(z)
>>> e_gpu = e1z(z_gpu, pycuda.autoinit.device)
>>> e_sp = scipy.special.exp1(z)
>>> np.allclose(e_sp, e_gpu.get())
True
"""
if z_gpu.dtype == np.complex64:
use_double = 0
elif z_gpu.dtype == np.complex128:
use_double = 1
else:
raise ValueError('unsupported type')
# Get block/grid sizes; the number of threads per block is limited
# to 256 because the e1z kernel defined above uses too many
# registers to be invoked more threads per block:
dev = get_current_device()
max_threads_per_block = 256
block_dim, grid_dim = select_block_grid_sizes(dev, z_gpu.shape, max_threads_per_block)
# Set this to False when debugging to make sure the compiled kernel is
# not cached:
cache_dir=None
e1z_mod = \
SourceModule(e1z_mod_template.substitute(use_double=use_double),
cache_dir=cache_dir)
e1z_func = e1z_mod.get_function("e1z")
e_gpu = gpuarray.empty_like(z_gpu)
e1z_func(z_gpu, e_gpu,
np.uint32(z_gpu.size),
block=block_dim,
grid=grid_dim)
return e_gpu
开发者ID:sequoiar,项目名称:scikits.cuda,代码行数:60,代码来源:special.py
示例4: gen_trapz2d_mult
def gen_trapz2d_mult(mat_shape, mult_type):
"""
Generate multiplication matrix for 2D trapezoidal integration.
Generates a matrix whose dot product with some other matrix of
equal length (when flattened) is equivalent to the definite double
integral of the latter computed using trapezoidal integration.
Parameters
----------
mat_shape : tuple
Shape of matrix.
mult_type : float type
Floating point type to use when generating the array.
Returns
-------
result : pycuda.gpuarray.GPUArray
Generated matrix.
"""
if mult_type not in [np.float32, np.float64, np.complex64,
np.complex128]:
raise ValueError('unrecognized type')
use_double = int(mult_type in [np.float64, np.complex128])
use_complex = int(mult_type in [np.complex64, np.complex128])
# Allocate output matrix:
Ny, Nx = mat_shape
mult_gpu = gpuarray.empty(mat_shape, mult_type)
# Get block/grid sizes:
dev = get_current_device()
block_dim, grid_dim = select_block_grid_sizes(dev, mat_shape)
# Set this to False when debugging to make sure the compiled kernel is
# not cached:
cache_dir=None
gen_trapz2d_mult_mod = \
SourceModule(gen_trapz2d_mult_template.substitute(use_double=use_double,
use_complex=use_complex),
cache_dir=cache_dir)
gen_trapz2d_mult = gen_trapz2d_mult_mod.get_function("gen_trapz2d_mult")
gen_trapz2d_mult(mult_gpu, np.uint32(Ny), np.uint32(Nx),
block=block_dim,
grid=grid_dim)
return mult_gpu
开发者ID:sequoiar,项目名称:scikits.cuda,代码行数:51,代码来源:integrate.py
示例5: gen_trapz2d_mult
def gen_trapz2d_mult(mat_shape, dtype):
"""
Generate multiplication matrix for 2D trapezoidal integration.
Generates a matrix whose dot product with some other matrix of
equal length (when flattened) is equivalent to the definite double
integral of the latter computed using trapezoidal integration.
Parameters
----------
mat_shape : tuple
Shape of matrix.
dtype : float type
Floating point type to use when generating the array.
Returns
-------
result : pycuda.gpuarray.GPUArray
Generated matrix.
"""
if dtype not in [np.float32, np.float64, np.complex64,
np.complex128]:
raise ValueError('unrecognized type')
use_double = int(dtype in [np.float64, np.complex128])
use_complex = int(dtype in [np.complex64, np.complex128])
# Allocate output matrix:
Ny, Nx = mat_shape
mult_gpu = gpuarray.empty(mat_shape, dtype)
# Get block/grid sizes:
dev = misc.get_current_device()
block_dim, grid_dim = misc.select_block_grid_sizes(dev, mat_shape)
gen_trapz2d_mult = _get_trapz2d_mult_kernel(use_double, use_complex)
gen_trapz2d_mult(mult_gpu, np.uint32(Ny), np.uint32(Nx),
block=block_dim,
grid=grid_dim)
return mult_gpu
开发者ID:Brainiarc7,项目名称:scikit-cuda,代码行数:41,代码来源:integrate.py
示例6: diag
def diag(v_gpu):
"""
Construct a diagonal matrix.
Constructs a matrix in device memory whose diagonal elements
correspond to the elements in the specified array; all
non-diagonal elements are set to 0.
Parameters
----------
v_obj : pycuda.gpuarray.GPUArray
Input array of length `n`.
Returns
-------
d_gpu : pycuda.gpuarray.GPUArray
Diagonal matrix of dimensions `[n, n]`.
Examples
--------
>>> import pycuda.driver as drv
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import linalg
>>> linalg.init()
>>> v = np.array([1, 2, 3, 4, 5, 6], np.float32)
>>> v_gpu = gpuarray.to_gpu(v)
>>> d_gpu = linalg.diag(v_gpu)
>>> np.all(d_gpu.get() == np.diag(v))
True
>>> v = np.array([1j, 2j, 3j, 4j, 5j, 6j], np.complex64)
>>> v_gpu = gpuarray.to_gpu(v)
>>> d_gpu = linalg.diag(v_gpu)
>>> np.all(d_gpu.get() == np.diag(v))
True
"""
if v_gpu.dtype not in [np.float32, np.float64, np.complex64,
np.complex128]:
raise ValueError('unrecognized type')
if len(v_gpu.shape) > 1:
raise ValueError('input array cannot be multidimensional')
use_double = int(v_gpu.dtype in [np.float64, np.complex128])
use_complex = int(v_gpu.dtype in [np.complex64, np.complex128])
# Initialize output matrix:
d_gpu = misc.zeros((v_gpu.size, v_gpu.size), v_gpu.dtype)
# Get block/grid sizes:
dev = misc.get_current_device()
block_dim, grid_dim = misc.select_block_grid_sizes(dev, d_gpu.shape)
# Set this to False when debugging to make sure the compiled kernel is
# not cached:
cache_dir=None
diag_mod = \
SourceModule(diag_template.substitute(use_double=use_double,
use_complex=use_complex),
cache_dir=cache_dir)
diag = diag_mod.get_function("diag")
diag(v_gpu, d_gpu, np.uint32(v_gpu.size),
block=block_dim,
grid=grid_dim)
return d_gpu
开发者ID:mforbes,项目名称:scikits.cuda,代码行数:70,代码来源:linalg.py
示例7: conj
def conj(a_gpu, overwrite=True):
"""
Complex conjugate.
Compute the complex conjugate of the array in device memory.
Parameters
----------
a_gpu : pycuda.gpuarray.GPUArray
Input array of shape `(m, n)`.
overwrite : bool
If true (default), save the result in the specified array.
If false, return the result in a newly allocated array.
Returns
-------
ac_gpu : pycuda.gpuarray.GPUArray
Conjugate of the input array. If `overwrite` is true, the
returned matrix is the same as the input array.
Examples
--------
>>> import pycuda.driver as drv
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import linalg
>>> linalg.init()
>>> a = np.array([[1+1j, 2-2j, 3+3j, 4-4j], [5+5j, 6-6j, 7+7j, 8-8j]], np.complex64)
>>> a_gpu = gpuarray.to_gpu(a)
>>> a_gpu = linalg.conj(a_gpu)
>>> np.all(a == np.conj(a_gpu.get()))
True
"""
# Don't attempt to process non-complex matrix types:
if a_gpu.dtype in [np.float32, np.float64]:
return
if a_gpu.dtype == np.complex64:
use_double = 0
elif a_gpu.dtype == np.complex128:
use_double = 1
else:
raise ValueError('unsupported type')
# Get block/grid sizes:
dev = misc.get_current_device()
block_dim, grid_dim = misc.select_block_grid_sizes(dev, a_gpu.shape)
# Set this to False when debugging to make sure the compiled kernel is
# not cached:
cache_dir=None
conj_mod = \
SourceModule(conj_template.substitute(use_double=use_double),
cache_dir=cache_dir)
if overwrite:
conj_inplace = conj_mod.get_function("conj_inplace")
conj_inplace(a_gpu, np.uint32(a_gpu.size),
block=block_dim,
grid=grid_dim)
return a_gpu
else:
conj = conj_mod.get_function("conj")
ac_gpu = gpuarray.empty_like(a_gpu)
conj(a_gpu, ac_gpu, np.uint32(a_gpu.size),
block=block_dim,
grid=grid_dim)
return ac_gpu
开发者ID:mforbes,项目名称:scikits.cuda,代码行数:71,代码来源:linalg.py
示例8: hermitian
def hermitian(a_gpu):
"""
Hermitian (conjugate) matrix transpose.
Conjugate transpose a matrix in device memory and return an object
representing the transposed matrix.
Parameters
----------
a_gpu : pycuda.gpuarray.GPUArray
Input matrix of shape `(m, n)`.
Returns
-------
at_gpu : pycuda.gpuarray.GPUArray
Transposed matrix of shape `(n, m)`.
Examples
--------
>>> import pycuda.autoinit
>>> import pycuda.driver as drv
>>> import pycuda.gpuarray as gpuarray
>>> import numpy as np
>>> import linalg
>>> linalg.init()
>>> a = np.array([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], np.float32)
>>> a_gpu = gpuarray.to_gpu(a)
>>> at_gpu = linalg.hermitian(a_gpu)
>>> np.all(a.T == at_gpu.get())
True
>>> b = np.array([[1j, 2j, 3j, 4j, 5j, 6j], [7j, 8j, 9j, 10j, 11j, 12j]], np.complex64)
>>> b_gpu = gpuarray.to_gpu(b)
>>> bt_gpu = linalg.hermitian(b_gpu)
>>> np.all(np.conj(b.T) == bt_gpu.get())
True
"""
if a_gpu.dtype not in [np.float32, np.float64, np.complex64,
np.complex128]:
raise ValueError('unrecognized type')
use_double = int(a_gpu.dtype in [np.float64, np.complex128])
use_complex = int(a_gpu.dtype in [np.complex64, np.complex128])
# Get block/grid sizes:
dev = misc.get_current_device()
block_dim, grid_dim = misc.select_block_grid_sizes(dev, a_gpu.shape)
# Set this to False when debugging to make sure the compiled kernel is
# not cached:
cache_dir=None
transpose_mod = \
SourceModule(transpose_template.substitute(use_double=use_double,
use_complex=use_complex,
hermitian=1,
cols=a_gpu.shape[1],
rows=a_gpu.shape[0]),
cache_dir=cache_dir)
transpose = transpose_mod.get_function("transpose")
at_gpu = gpuarray.empty(a_gpu.shape[::-1], a_gpu.dtype)
transpose(at_gpu, a_gpu, np.uint32(a_gpu.size),
block=block_dim,
grid=grid_dim)
return at_gpu
开发者ID:mforbes,项目名称:scikits.cuda,代码行数:67,代码来源:linalg.py
示例9: multiply
def multiply(x_gpu, y_gpu, overwrite=True):
"""
Multiply arguments element-wise.
Parameters
----------
x_gpu, y_gpu : pycuda.gpuarray.GPUArray
Input arrays to be multiplied.
dev : pycuda.driver.Device
Device object to be used.
overwrite : bool
If true (default), return the result in `y_gpu`.
is false, return the result in a newly allocated array.
Returns
-------
z_gpu : pycuda.gpuarray.GPUArray
The element-wise product of the input arrays.
Examples
--------
>>> import pycuda.autoinit
>>> import pycuda.gpuarray as gpuarray
>>> import numpy as np
>>> import linalg
>>> linalg.init()
>>> x = np.asarray(np.random.rand(4, 4), np.float32)
>>> y = np.asarray(np.random.rand(4, 4), np.float32)
>>> x_gpu = gpuarray.to_gpu(x)
>>> y_gpu = gpuarray.to_gpu(y)
>>> z_gpu = linalg.multiply(x_gpu, y_gpu)
>>> np.allclose(x*y, z_gpu.get())
True
"""
if x_gpu.shape != y_gpu.shape:
raise ValueError('input arrays must have the same shape')
if x_gpu.dtype not in [np.float32, np.float64, np.complex64,
np.complex128]:
raise ValueError('unrecognized type')
use_double = int(x_gpu.dtype in [np.float64, np.complex128])
use_complex = int(x_gpu.dtype in [np.complex64, np.complex128])
# Get block/grid sizes:
dev = misc.get_current_device()
block_dim, grid_dim = misc.select_block_grid_sizes(dev, x_gpu.shape)
# Set this to False when debugging to make sure the compiled kernel is
# not cached:
cache_dir=None
multiply_mod = \
SourceModule(multiply_template.substitute(use_double=use_double,
use_complex=use_complex),
cache_dir=cache_dir)
if overwrite:
multiply = multiply_mod.get_function("multiply_inplace")
multiply(x_gpu, y_gpu, np.uint32(x_gpu.size),
block=block_dim,
grid=grid_dim)
return y_gpu
else:
multiply = multiply_mod.get_function("multiply")
z_gpu = gpuarray.empty(x_gpu.shape, x_gpu.dtype)
multiply(x_gpu, y_gpu, z_gpu, np.uint32(x_gpu.size),
block=block_dim,
grid=grid_dim)
return z_gpu
开发者ID:mforbes,项目名称:scikits.cuda,代码行数:70,代码来源:linalg.py
示例10: tril
def tril(a_gpu, overwrite=True, handle=None):
"""
Lower triangle of a matrix.
Return the lower triangle of a square matrix.
Parameters
----------
a_gpu : pycuda.gpuarray.GPUArray
Input matrix of shape `(m, m)`
overwrite : boolean
If true (default), zero out the upper triangle of the matrix.
If false, return the result in a newly allocated matrix.
handle : int
CUBLAS context. If no context is specified, the default handle from
`scikits.misc._global_cublas_handle` is used.
Returns
-------
l_gpu : pycuda.gpuarray
The lower triangle of the original matrix.
Examples
--------
>>> import pycuda.driver as drv
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import linalg
>>> linalg.init()
>>> a = np.asarray(np.random.rand(4, 4), np.float32)
>>> a_gpu = gpuarray.to_gpu(a)
>>> l_gpu = linalg.tril(a_gpu, False)
>>> np.allclose(np.tril(a), l_gpu.get())
True
"""
if handle is None:
handle = misc._global_cublas_handle
if len(a_gpu.shape) != 2 or a_gpu.shape[0] != a_gpu.shape[1]:
raise ValueError('matrix must be square')
if a_gpu.dtype == np.float32:
swap_func = cublas.cublasSswap
copy_func = cublas.cublasScopy
use_double = 0
use_complex = 0
elif a_gpu.dtype == np.float64:
swap_func = cublas.cublasDswap
copy_func = cublas.cublasDcopy
use_double = 1
use_complex = 0
elif a_gpu.dtype == np.complex64:
swap_func = cublas.cublasCswap
copy_func = cublas.cublasCcopy
use_double = 0
use_complex = 1
elif a_gpu.dtype == np.complex128:
swap_func = cublas.cublasZswap
copy_func = cublas.cublasZcopy
use_double = 1
use_complex = 1
else:
raise ValueError('unrecognized type')
N = a_gpu.shape[0]
# Get block/grid sizes:
dev = misc.get_current_device()
block_dim, grid_dim = misc.select_block_grid_sizes(dev, a_gpu.shape)
# Set this to False when debugging to make sure the compiled kernel is
# not cached:
cache_dir=None
tril_mod = \
SourceModule(tril_template.substitute(use_double=use_double,
use_complex=use_complex,
cols=N),
cache_dir=cache_dir)
tril = tril_mod.get_function("tril")
if not overwrite:
a_orig_gpu = gpuarray.empty(a_gpu.shape, a_gpu.dtype)
copy_func(handle, a_gpu.size, int(a_gpu.gpudata), 1, int(a_orig_gpu.gpudata), 1)
tril(a_gpu, np.uint32(a_gpu.size),
block=block_dim,
grid=grid_dim)
if overwrite:
return a_gpu
else:
# Restore original contents of a_gpu:
swap_func(handle, a_gpu.size, int(a_gpu.gpudata), 1, int(a_orig_gpu.gpudata), 1)
return a_orig_gpu
开发者ID:mforbes,项目名称:scikits.cuda,代码行数:98,代码来源:linalg.py
示例11: pinv
def pinv(a_gpu, rcond=1e-15):
"""
Moore-Penrose pseudoinverse.
Compute the Moore-Penrose pseudoinverse of the specified matrix.
Parameters
----------
a_gpu : pycuda.gpuarray.GPUArray
Input matrix of shape `(m, n)`.
rcond : float
Singular values smaller than `rcond`*max(singular_values)`
are set to zero.
Returns
-------
a_inv_gpu : pycuda.gpuarray.GPUArray
Pseudoinverse of input matrix.
Notes
-----
Double precision is only supported if the standard version of the
CULA Dense toolkit is installed.
This function destroys the contents of the input matrix.
If the input matrix is square, the pseudoinverse uses less memory.
Examples
--------
>>> import pycuda.driver as drv
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import linalg
>>> linalg.init()
>>> a = np.asarray(np.random.rand(8, 4), np.float32)
>>> a_gpu = gpuarray.to_gpu(a)
>>> a_inv_gpu = linalg.pinv(a_gpu)
>>> np.allclose(np.linalg.pinv(a), a_inv_gpu.get(), 1e-4)
True
>>> b = np.asarray(np.random.rand(8, 4)+1j*np.random.rand(8, 4), np.complex64)
>>> b_gpu = gpuarray.to_gpu(b)
>>> b_inv_gpu = linalg.pinv(b_gpu)
>>> np.allclose(np.linalg.pinv(b), b_inv_gpu.get(), 1e-4)
True
"""
if not _has_cula:
raise NotImplementedError('CULA not installed')
# Perform in-place SVD if the matrix is square to save memory:
if a_gpu.shape[0] == a_gpu.shape[1]:
u_gpu, s_gpu, vh_gpu = svd(a_gpu, 's', 'o')
else:
u_gpu, s_gpu, vh_gpu = svd(a_gpu, 's', 's')
# Get block/grid sizes; the number of threads per block is limited
# to 512 because the cutoff_invert_s kernel defined above uses too
# many registers to be invoked in 1024 threads per block (i.e., on
# GPUs with compute capability >= 2.x):
dev = misc.get_current_device()
max_threads_per_block = 512
block_dim, grid_dim = misc.select_block_grid_sizes(dev, s_gpu.shape, max_threads_per_block)
# Suppress very small singular values:
use_double = 1 if s_gpu.dtype == np.float64 else 0
cutoff_invert_s_mod = \
SourceModule(cutoff_invert_s_template.substitute(use_double=use_double))
cutoff_invert_s = \
cutoff_invert_s_mod.get_function('cutoff_invert_s')
cutoff_gpu = gpuarray.max(s_gpu)*rcond
cutoff_invert_s(s_gpu, cutoff_gpu,
np.uint32(s_gpu.size),
block=block_dim, grid=grid_dim)
# Compute the pseudoinverse without allocating a new diagonal matrix:
return dot(vh_gpu, dot_diag(s_gpu, u_gpu, 't'), 'c', 'c')
开发者ID:mforbes,项目名称:scikits.cuda,代码行数:79,代码来源:linalg.py
示例12: diag
def diag(v_gpu):
"""
Construct a diagonal matrix if input array is one-dimensional,
or extracts diagonal entries of a two-dimensional array.
--- If input-array is one-dimensional:
Constructs a matrix in device memory whose diagonal elements
correspond to the elements in the specified array; all
non-diagonal elements are set to 0.
--- If input-array is two-dimensional:
Constructs an array in device memory whose elements
correspond to the elements along the main-diagonal of the specified
array.
Parameters
----------
v_obj : pycuda.gpuarray.GPUArray
Input array of shape `(n,m)`.
Returns
-------
d_gpu : pycuda.gpuarray.GPUArray
---If v_obj has shape `(n,1)`, output is
diagonal matrix of dimensions `[n, n]`.
---If v_obj has shape `(n,m)`, output is
array of length `min(n,m)`.
Examples
--------
>>> import pycuda.driver as drv
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import linalg
>>> linalg.init()
>>> v = np.array([1, 2, 3, 4, 5, 6], np.float32)
>>> v_gpu = gpuarray.to_gpu(v)
>>> d_gpu = linalg.diag(v_gpu)
>>> np.all(d_gpu.get() == np.diag(v))
True
>>> v = np.array([1j, 2j, 3j, 4j, 5j, 6j], np.complex64)
>>> v_gpu = gpuarray.to_gpu(v)
>>> d_gpu = linalg.diag(v_gpu)
>>> np.all(d_gpu.get() == np.diag(v))
True
>>> v = np.array([[1., 2., 3.],[4., 5., 6.]], np.float64)
>>> v_gpu = gpuarray.to_gpu(v)
>>> d_gpu = linalg.diag(v_gpu)
>>> d_gpu
array([ 1., 5.])
"""
if v_gpu.dtype not in [np.float32, np.float64, np.complex64,
np.complex128]:
raise ValueError('unrecognized type')
if (len(v_gpu.shape) > 1) and (len(v_gpu.shape) < 3):
# Since CUDA assumes that arrays are stored in column-major
# format, the input matrix is assumed to be transposed:
n, m = v_gpu.shape
square = (n == m)
# Allocate the output array
d_gpu = gpuarray.empty(min(m, n), v_gpu.dtype.type)
diag_kernel = el.ElementwiseKernel("double *x, double *y, int z", "y[i] = x[(z+1)*i]", "diakernel")
diag_kernel(v_gpu,d_gpu,max(m,n))
return d_gpu
elif len(v_gpu.shape) >= 3:
raise ValueError('input array cannot have greater than 2-dimensions')
use_double = int(v_gpu.dtype in [np.float64, np.complex128])
use_complex = int(v_gpu.dtype in [np.complex64, np.complex128])
# Initialize output matrix:
d_gpu = misc.zeros((v_gpu.size, v_gpu.size), v_gpu.dtype)
# Get block/grid sizes:
dev = misc.get_current_device()
block_dim, grid_dim = misc.select_block_grid_sizes(dev, d_gpu.shape)
# Set this to False when debugging to make sure the compiled kernel is
# not cached:
cache_dir=None
diag_mod = \
SourceModule(diag_template.substitute(use_double=use_double,
use_complex=use_complex),
cache_dir=cache_dir)
diag = diag_mod.get_function("diag")
diag(v_gpu, d_gpu, np.uint32(v_gpu.size),
block=block_dim,
grid=grid_dim)
return d_gpu
开发者ID:stevertaylor,项目名称:scikits.cuda,代码行数:98,代码来源:linalg.py
示例13: sici
def sici(x_gpu):
"""
Sine/Cosine integral.
Computes the sine and cosine integral of every element in the
input matrix.
Parameters
----------
x_gpu : GPUArray
Input matrix of shape `(m, n)`.
Returns
-------
(si_gpu, ci_gpu) : tuple of GPUArrays
Tuple of GPUarrays containing the sine integrals and cosine
integrals of the entries of `x_gpu`.
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import scipy.special
>>> import special
>>> x = np.array([[1, 2], [3, 4]], np.float32)
>>> x_gpu = gpuarray.to_gpu(x)
>>> (si_gpu, ci_gpu) = sici(x_gpu, pycuda.autoinit.device)
>>> (si, ci) = scipy.special.sici(x)
>>> np.allclose(si, si_gpu.get())
True
>>> np.allclose(ci, ci_gpu.get())
True
"""
if x_gpu.dtype == np.float32:
use_double = 0
elif x_gpu.dtype == np.float64:
use_double = 1
else:
raise ValueError('unsupported type')
# Get block/grid sizes:
dev = misc.get_current_device()
block_dim, grid_dim = misc.select_block_grid_sizes(dev, x_gpu.shape)
# Set this to False when debugging to make sure the compiled kernel is
# not cached:
cache_dir=None
sici_mod = \
SourceModule(sici_template.substitute(use_double=use_double),
cache_dir=cache_dir,
options=["-I", install_headers])
sici_func = sici_mod.get_function("sici_array")
si_gpu = gpuarray.empty_like(x_gpu)
ci_gpu = gpuarray.empty_like(x_gpu)
sici_func(x_gpu, si_gpu, ci_gpu,
np.uint32(x_gpu.size),
block=block_dim,
grid=grid_dim)
return (si_gpu, ci_gpu)
开发者ID:alemagnani,项目名称:scikits.cuda,代码行数:63,代码来源:special.py
示例14: pinv
def pinv(a_gpu, dev, rcond=1e-15):
"""
Moore-Penrose pseudoinverse.
Compute the Moore-Penrose pseudoinverse of the specified matrix.
Parameters
----------
a_gpu : pycuda.gpuarray.GPUArray
Input matrix of shape `(m, n)`.
dev : pycuda.driver.Device
Device object to be used.
rcond : float
Singular values smaller than `rcond`*max(singular_values)`
are set to zero.
Returns
-------
a_inv_gpu : pycuda.gpuarray.GPUArray
Pseudoinverse of input matrix.
Examples
--------
>>> import pycuda.driver as drv
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import linalg
>>> linalg.init()
>>> a = np.asarray(np.random.rand(8, 4), np.float32)
>>> a_gpu = gpuarray.to_gpu(a)
>>> a_inv_gpu = pinv(a_gpu, pycuda.autoinit.device)
>>> np.allclose(np.linalg.pinv(a), a_inv_gpu.get(), 1e-4)
True
>>> b = np.asarray(np.random.rand(8, 4)+1j*np.random.rand(8, 4), np.complex64)
>>> b_gpu = gpuarray.to_gpu(b)
>>> b_inv_gpu = pinv(b_gpu, pycuda.autoinit.device)
>>> np.allclose(np.linalg.pinv(b), b_inv_gpu.get(), 1e-4)
True
"""
# Check input dtype because the SVD can only be computed in single
# precision:
if a_gpu.dtype not in [np.float32, np.complex64]:
raise ValueError('unsupported type')
# Compute SVD:
u_gpu, s_gpu, vh_gpu = svd(a_gpu, 0)
uh_gpu = transpose(u_gpu, dev)
# Get block/grid sizes:
max_threads_per_block, max_block_dim, max_grid_dim = get_dev_attrs(dev)
block_dim, grid_dim = select_block_grid_sizes(dev, s_gpu.shape)
max_blocks_per_grid = max(max_grid_dim)
# Suppress very small singular values:
cutoff_invert_s_mod = \
SourceModule(cutoff_invert_s_mod_template.substitute(
max_threads_per_block=max_threads_per_block,
max_blocks_per_grid=max_blocks_per_grid))
cutoff_invert_s = \
cutoff_invert_s_mod.get_function('cutoff_invert_s')
cutoff_gpu = gpuarray.max(s_gpu)*rcond
cutoff_invert_s(s_gpu.gpudata, cutoff_gpu.gpudata,
np.uint32(s_gpu.size),
block=block_dim, grid=grid_dim)
# The singular values must data type is in uh_gpu:
if s_gpu.dtype == uh_gpu.dtype:
s_diag_gpu = diag(s_gpu, dev)
else:
s_diag_gpu = diag(s_gpu.astype(uh_gpu.dtype), dev)
# Finish pinv computation:
v_gpu = transpose(vh_gpu, dev)
suh_gpu = dot(s_diag_gpu, uh_gpu)
return dot(v_gpu, suh_gpu)
开发者ID:aeweiwi,项目名称:scikits.cuda,代码行数:78,代码来源:linalg.py
示例15: conj
def conj(a_gpu, dev):
"""
Complex conjugate.
Compute the complex conjugate of the matrix in device memory.
Parameters
----------
a_gpu : pycuda.gpuarray.GPUArray
Input matrix of shape `(m, n)`.
dev : pycuda.driver.Device
Device object to be used.
Notes
-----
The input matrix is modified in place.
This function assumes that the input matrix contains complex
numbers; undefined behavior may occur for other types.
Examples
--------
>>> import pycuda.driver as drv
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import linalg
>>> linalg.init()
>>> a = np.array([[1+1j, 2-2j, 3+3j, 4-4j], [5+5j, 6-6j, 7+7j, 8-8j]], np.complex64)
>>> a_gpu = gpuarray.to_gpu(a)
>>> conj(a_gpu, pycuda.autoinit.device)
>>> np.all(a == np.conj(a_gpu.get()))
True
"""
# Don't attempt to process non-complex matrix types:
if a_gpu.dtype in [np.float32, np.float64]:
return
if a_gpu.dtype == np.complex64:
use_double = 0
elif a_gpu.dtype == np.complex128:
use_double = 1
else:
raise ValueError('unsupported type')
# Get block/grid sizes:
max_threads_per_block, max_block_dim, max_grid_dim = get_dev_attrs(dev)
block_dim, grid_dim = select_block_grid_sizes(dev, a_gpu.shape)
max_blocks_per_grid = max(max_grid_dim)
# Set this to False when debugging to make sure the compiled kernel is
# not cached:
cache_dir=None
conj_mod = \
SourceModule(conj_mod_template.substitute(use_double=use_double,
max_threads_per_block=max_threads_per_block,
max_blocks_per_grid=max_blocks_per_grid),
cache_dir=cache_dir)
conj = conj_mod.get_function("conj")
conj(a_gpu.gpudata,
np.uint32(a_gpu.size),
block=block_dim,
grid=grid_dim)
开发者ID:aeweiwi,项目名称:scikits.cuda,代码行数:66,代码来源:linalg.py
示例16: transpose
def transpose(a_gpu, dev):
"""
Matrix transpose.
Transpose a matrix in device memory and return an object
representing the transposed matrix.
Parameters
----------
a_gpu : pycuda.gpuarray.GPUArray
Input matrix of shape `(m, n)`.
dev : pycuda.driver.Device
Device object to be used.
Returns
-------
at_gpu : pycuda.gpuarray.GPUArray
Transposed matrix of shape `(n, m)`.
dev : pycuda.driver.Device
Device object to be used.
Notes
-----
If the specified matrix type is complex, the function will return
the Hermitian of the input matrix.
Examples
--------
>>> import pycuda.autoinit
>>> import pycuda.driver as drv
>>> import pycuda.gpuarray as gpuarray
>>> import numpy as np
>>> import linalg
>>> linalg.init()
>>> a = np.array([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], np.float32)
>>> a_gpu = gpuarray.to_gpu(a)
>>> at_gpu = transpose(a_gpu, pycuda.autoinit.device)
>>> np.all(a.T == at_gpu.get())
True
>>> b = np.array([[1j, 2j, 3j, 4j, 5j, 6j], [7j, 8j, 9j, 10j, 11j, 12j]], np.complex64)
>>> b_gpu = gpuarray.to_gpu(b)
>>> bt_gpu = transpose(b_gpu, pycuda.autoinit.device)
>>> np.all(np.conj(b.T) == bt_gpu.get())
True
"""
if a_gpu.dtype not in [np.float32, np.float64, np.complex64,
np.complex128]:
raise ValueError('unrecognized type')
use_double = int(a_gpu.dtype in [np.float64, np.complex128])
use_complex = int(a_gpu.dtype in [np.complex64, np.complex128])
# Get block/grid sizes:
max_threads_per_block, max_block_dim, max_grid_dim = get_dev_attrs(dev)
block_dim, grid_dim = select_block_grid_sizes(dev, a_gpu.shape)
max_blocks_per_grid = max(max_grid_dim)
# Set this to False when debugging to make sure the compiled kernel is
# not cached:
cache_dir=None
transpose_mod = \
SourceModule(transpose_mod_template.substitute(use_double=use_double,
use_complex=use_complex,
max_threads_per_block=max_threads_per_block,
max_blocks_per_grid=max_blocks_per_grid,
cols=a_gpu.shape[1],
rows=a_gpu.shape[0]),
cache_dir=cache_dir)
transpose = transpose_mod.get_function("transpose")
at_gpu = gpuarray.empty(a_gpu.shape[::-1], a_gpu.dtype)
transpose(at_gpu.gpudata, a_gpu.gpudata,
np.uint32(a_gpu.size),
block=block_dim,
grid=grid_dim)
return at_gpu
开发者ID:aeweiwi,项目名称:scikits.cuda,代码行数:79,代码来源:linalg.py
注:本文中的misc.select_block_grid_sizes函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论