• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python mpi4py.MPI类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中mpi4py.MPI的典型用法代码示例。如果您正苦于以下问题:Python MPI类的具体用法?Python MPI怎么用?Python MPI使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了MPI类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testHandleValue

 def testHandleValue(self):
     typemap = {ctypes.sizeof(ctypes.c_uint32): ctypes.c_uint32,
                ctypes.sizeof(ctypes.c_uint64): ctypes.c_uint64}
     for obj in self.objects:
         uintptr_t = typemap[MPI._sizeof(obj)]
         handle = uintptr_t.from_address(MPI._addressof(obj))
         self.assertEqual(handle.value, MPI._handleof(obj))
开发者ID:benkirk,项目名称:mpi_playground,代码行数:7,代码来源:test_ctypes.py


示例2: testAHandleOf

 def testAHandleOf(self):
     for obj in self.objects:
         if isinstance(obj, MPI.Status):
             hdl = lambda: MPI._handleof(obj)
             self.assertRaises(NotImplementedError, hdl)
             continue
         hdl = MPI._handleof(obj)
开发者ID:benkirk,项目名称:mpi_playground,代码行数:7,代码来源:test_objmodel.py


示例3: testHandleValue

 def testHandleValue(self):
     ffi = cffi.FFI()
     typemap = {ffi.sizeof('uint32_t'): 'uint32_t',
                ffi.sizeof('uint64_t'): 'uint64_t',}
     for obj in self.objects:
         uintptr_t = typemap[MPI._sizeof(obj)]
         handle = ffi.cast(uintptr_t+'*', MPI._addressof(obj))[0]
         self.assertEqual(handle, MPI._handleof(obj))
开发者ID:benkirk,项目名称:mpi_playground,代码行数:8,代码来源:test_cffi.py


示例4: ncmpi_open

def ncmpi_open(name):
    comm_ptr = MPI._addressof(MPI.COMM_WORLD)
    comm_val = MPI_Comm.from_address(comm_ptr)
    info_ptr = MPI._addressof(MPI.INFO_NULL)
    info_val = MPI_Comm.from_address(info_ptr)
    ncid = c_int()
    retval = _ncmpi_open(comm_val, name, NC_NOWRITE, info_val, byref(ncid))
    errcheck(retval)
    return ncid.value
开发者ID:abhinavvishnu,项目名称:matex,代码行数:9,代码来源:pnetcdf.py


示例5: testHandleAdress

 def testHandleAdress(self):
     typemap = {ctypes.sizeof(ctypes.c_int): ctypes.c_int,
                ctypes.sizeof(ctypes.c_void_p): ctypes.c_void_p}
     for obj in self.objects:
         handle_t = typemap[MPI._sizeof(obj)]
         oldobj = obj
         newobj = type(obj)()
         handle_old = handle_t.from_address(MPI._addressof(oldobj))
         handle_new = handle_t.from_address(MPI._addressof(newobj))
         handle_new.value = handle_old.value
         self.assertEqual(obj, newobj)
开发者ID:benkirk,项目名称:mpi_playground,代码行数:11,代码来源:test_ctypes.py


示例6: check_mpi

def check_mpi():
    mpiexec_path, _ = os.path.split(distutils.spawn.find_executable("mpiexec"))
    for executable, path in mpi4py.get_config().items():
        if executable not in ['mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort']:
            continue
        if mpiexec_path not in path:
            raise ImportError("mpi4py may not be configured against the same version of 'mpiexec' that you are using. The 'mpiexec' path is {mpiexec_path} and mpi4py.get_config() returns:\n{mpi4py_config}\n".format(mpiexec_path=mpiexec_path, mpi4py_config=mpi4py.get_config()))
    if 'Open MPI' not in MPI.get_vendor():
        raise ImportError("mpi4py must have been installed against Open MPI in order for StructOpt to function correctly.")
    vendor_number = ".".join([str(x) for x in MPI.get_vendor()[1]])
    if vendor_number not in mpiexec_path:
        raise ImportError("The MPI version that mpi4py was compiled against does not match the version of 'mpiexec'. mpi4py's version number is {}, and mpiexec's path is {}".format(MPI.get_vendor(), mpiexec_path))
开发者ID:jjmaldonis,项目名称:mpi-parallelization,代码行数:12,代码来源:check_mpi.py


示例7: ncmpi_open

def ncmpi_open(name):
    if sys.version_info >= (3,0,0):
        name = bytes(name, 'utf-8')
    comm_ptr = MPI._addressof(MPI.COMM_WORLD)
    comm_val = MPI_Comm.from_address(comm_ptr)
    info_ptr = MPI._addressof(MPI.INFO_NULL)
    info_val = MPI_Info.from_address(info_ptr)
    ncid = c_int()
    retval = _ncmpi_open(comm_val, name, NC_NOWRITE, info_val, byref(ncid))
    # print("TEST")
    errcheck(retval)
    # print("TEST")
    return ncid.value
开发者ID:abhinavvishnu,项目名称:matex,代码行数:13,代码来源:pnetcdf.py


示例8: __init__

 def __init__(self, comm=None):
     if comm is None:
         # Should only end up here upon unpickling
         comm = MPI.COMM_WORLD
     comm_ptr = MPI._addressof(comm)
     comm_val = self.dtype.from_address(comm_ptr)
     self.value = comm_val
开发者ID:opesci,项目名称:devito,代码行数:7,代码来源:distributed.py


示例9: main

def main(split_into=2, nloops=3):
    world = MPI.COMM_WORLD
    rank = world.Get_rank()
    size = world.Get_size()
    if size < split_into:
        raise ValueError("The number of cores passed to 'mpiexec' must be greater than the number of desired communicators.")
    cores_per_comm = size // split_into

    # Create fake data for input for each of the different processes we will spawn
    multipliers = [i+1 for i in range(split_into)]
    if 'Open MPI' not in MPI.get_vendor():
        colors = [(i+1)//split_into for i in range(split_into)]
        data_by_process = [(str(multipliers[i]), str(colors[i])) for i in range(split_into)]
    else:
        data_by_process = [(str(multipliers[i]),) for i in range(split_into)]


    if rank == 0:
        print("At each iteration we will spawn {} workers with {} cores each out of a total of {} cores.".format(split_into, cores_per_comm, size))
        print("Those {} split communicators will get the following as input:".format(split_into))
        for i in range(split_into):
            print("    Communicator {}: {}".format(i, data_by_process[i]))

        for i in range(nloops):
            print("Iteration {}...".format(i))
            spawn_multiple(split_into, cores_per_comm, data_by_process)
开发者ID:jjmaldonis,项目名称:mpi-parallelization,代码行数:26,代码来源:spawn_multiple_loop.py


示例10: getlibraryinfo

def getlibraryinfo():
    from mpi4py import MPI
    info = "MPI %d.%d" % MPI.Get_version()
    name, version = MPI.get_vendor()
    if name != "unknown":
        info += (" (%s %s)" % (name, '%d.%d.%d' % version))
    return info
开发者ID:erdc-cm,项目名称:mpi4py,代码行数:7,代码来源:runtests.py


示例11: ensure_mpd_is_running

def ensure_mpd_is_running():
    if not is_mpd_running():
        name_of_the_vendor, version = MPI.get_vendor()
        if name_of_the_vendor == "MPICH2":
            try:
                process = subprocess.Popen(["nohup", "mpd"], close_fds=True)
            except OSError as ex:
                pass
开发者ID:vdhelm,项目名称:amuse,代码行数:8,代码来源:background_test.py


示例12: setup_md

 def setup_md(self, icomm_grid, xyzL, xyz_orig):
     """
     setup_md(self, dt, icomm_grid, xyzL, xyz_orig)
     Keyword arguments:
     real -- the real part (default 0.0)
     imag -- the imaginary part (default 0.0)
     """
     self.py_setup_md(MPI._handleof(icomm_grid), xyzL, xyz_orig)
开发者ID:Crompulence,项目名称:cpl-library,代码行数:8,代码来源:cplpy.py


示例13: is_mpd_running

def is_mpd_running():
    name_of_the_vendor, version = MPI.get_vendor()
    if name_of_the_vendor == 'MPICH2':
        process = subprocess.Popen(['mpdtrace'], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
        (output_string, error_string) = process.communicate()
        return not (process.returncode == 255)
    else:
        return True
开发者ID:Ingwar,项目名称:amuse,代码行数:8,代码来源:background_test.py


示例14: _buffer_from_gpuarray

    def _buffer_from_gpuarray(self, array):
        data = array.gpudata
        # data might be an `int` or `DeviceAllocation`

        if isinstance(data, cuda.DeviceAllocation):
            return data.as_buffer(array.nbytes)
        else:
            # construct the buffer
            return MPI.make_buffer(array.gpudata, array.nbytes)
开发者ID:shwina,项目名称:gpuDA,代码行数:9,代码来源:gpuda.py


示例15: send

def send(data, data_package, dest=None, gpu_direct=True):
	global s_requests
	tag = 52
	dp = data_package
	# send data_package
	send_data_package(dp, dest=dest, tag=tag)

	bytes = dp.data_bytes
	memory_type = dp.memory_type
	
	if log_type in ['time','all']: st = time.time()

	flag = False
	request = None
	if memory_type == 'devptr': # data in the GPU
		if gpu_direct: # want to use GPU direct
			devptr = data
			buf = MPI.make_buffer(devptr.__int__(), bytes)
			ctx.synchronize()
			request = comm.Isend([buf, MPI.BYTE], dest=dest, tag=57)
			if VIVALDI_BLOCKING: MPI.Request.Wait(request)
			s_requests.append((request, buf, devptr))
			flag = True
		else:# not want to use GPU direct
		
			# copy to CPU
			shape = dp.data_memory_shape
			dtype = dp.data_contents_memory_dtype
			buf = numpy.empty(shape, dtype=dtype)
			cuda.memcpy_dtoh_async(buf, data, stream=stream_list[1])

			request = comm.Isend(buf, dest=dest, tag=57)
			if VIVALDI_BLOCKING: MPI.Request.Wait(request)
			s_requests.append((request, buf, None))
			
	else: # data in the CPU
		# want to use GPU direct, not exist case
		# not want to use GPU direct
		if dp.data_dtype == numpy.ndarray: 
			request = comm.Isend(data, dest=dest, tag=57)
			if VIVALDI_BLOCKING: MPI.Request.Wait(request)
			s_requests.append((request, data, None))
			
	if log_type in ['time','all']:
		u = dp.unique_id
		bytes = dp.data_bytes
		t = MPI.Wtime()-st
		ms = 1000*t
		bw = bytes/GIGA/t
	
		if flag:
			log("rank%d, \"%s\", u=%d, from rank%d to rank%d GPU direct send, Bytes: %dMB, time: %.3f ms, speed: %.3f GByte/sec"%(rank, name, u, rank, dest, bytes/MEGA, ms, bw),'time', log_type)
		else:
			log("rank%d, \"%s\", u=%d, from rank%d to rank%d MPI data transfer, Bytes: %dMB, time: %.3f ms, speed: %.3f GByte/sec"%(rank, name, u, rank, dest, bytes/MEGA, ms, bw),'time', log_type)
	
	return request
开发者ID:Anukura,项目名称:Vivaldi,代码行数:56,代码来源:GPU_unit.py


示例16: set_default_mpi_parameters

def set_default_mpi_parameters(parameters):
    # If mpi4py is used, make sure we can import it and set the rank/size for all cores in the parameters.mpi
    use_mpi4py = True
    if 'relaxations' in parameters:
        for module in parameters.relaxations:
            parameters.relaxations[module].setdefault('use_mpi4py', False)
            parameters.relaxations[module].setdefault('MPMD', 0)
            if parameters.relaxations[module].use_mpi4py:
                use_mpi4py = True
    if 'fitnesses' in parameters:
        for module in parameters.fitnesses:
            parameters.fitnesses[module].setdefault('use_mpi4py', False)
            parameters.fitnesses[module].setdefault('MPMD', 0)
            if parameters.fitnesses[module].use_mpi4py:
                use_mpi4py = True

    parameters.setdefault('mpi', {})
    if use_mpi4py:
        try:
            import mpi4py
        except ImportError:
            raise ImportError("mpi4py must be installed to use StructOpt.")
        mpiexec_path, _ = os.path.split(distutils.spawn.find_executable("mpiexec"))
        for executable, path in mpi4py.get_config().items():
            if executable not in ['mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort']:
                continue
            if mpiexec_path not in path:
                raise ImportError("mpi4py may not be configured against the same version of 'mpiexec' that you are using. The 'mpiexec' path is {mpiexec_path} and mpi4py.get_config() returns:\n{mpi4py_config}\n".format(mpiexec_path=mpiexec_path, mpi4py_config=mpi4py.get_config()))
        from mpi4py import MPI
        if 'Open MPI' not in MPI.get_vendor():
            raise ImportError("mpi4py must have been installed against Open MPI in order for StructOpt to function correctly.")
        vendor_number = ".".join([str(x) for x in MPI.get_vendor()[1]])
        if vendor_number not in mpiexec_path:
            raise ImportError("The MPI version that mpi4py was compiled against does not match the version of 'mpiexec'. mpi4py's version number is {}, and mpiexec's path is {}".format(MPI.get_vendor(), mpiexec_path))

        parameters.mpi.rank = MPI.COMM_WORLD.Get_rank()
        parameters.mpi.ncores = MPI.COMM_WORLD.Get_size()
    else:
        parameters.mpi.rank = 0
        parameters.mpi.ncores = 1

    return parameters
开发者ID:uw-cmg,项目名称:StructOpt,代码行数:42,代码来源:parameters.py


示例17: init

    def init(self, calling_realm):

        # Build a communicator mpi4py python object from the
        # handle returned by the CPL_init function.
        if MPI._sizeof(MPI.Comm) == ctypes.sizeof(c_int):
            MPI_Comm = c_int
        else:
            MPI_Comm = c_void_p

        # Call create comm
        returned_realm_comm = c_int()
        self._py_init(calling_realm, byref(returned_realm_comm))

        # Use an intracomm object as the template and override value
        newcomm = MPI.Intracomm()
        newcomm_ptr = MPI._addressof(newcomm)
        comm_val = MPI_Comm.from_address(newcomm_ptr)
        comm_val.value = returned_realm_comm.value

        return newcomm
开发者ID:Crompulence,项目名称:cpl-library,代码行数:20,代码来源:cplpy.py


示例18: testHandleAddress

 def testHandleAddress(self):
     ffi = cffi.FFI()
     typemap = {ffi.sizeof('int'): 'int',
                ffi.sizeof('void*'): 'void*'}
     typename = lambda t: t.__name__.rsplit('.', 1)[-1]
     for tp in self.mpitypes:
         handle_t = typemap[MPI._sizeof(tp)]
         mpi_t = 'MPI_' + typename(tp)
         ffi.cdef("typedef %s %s;" % (handle_t, mpi_t))
     for obj in self.objects:
         if isinstance(obj, MPI.Comm):
             mpi_t = 'MPI_Comm'
         else:
             mpi_t = 'MPI_' + typename(type(obj))
         oldobj = obj
         newobj = type(obj)()
         handle_old = ffi.cast(mpi_t+'*', MPI._addressof(oldobj))
         handle_new = ffi.cast(mpi_t+'*', MPI._addressof(newobj))
         handle_new[0] = handle_old[0]
         self.assertEqual(oldobj, newobj)
开发者ID:benkirk,项目名称:mpi_playground,代码行数:20,代码来源:test_cffi.py


示例19: testGetEnvelope

 def testGetEnvelope(self):
     for dtype in datatypes:
         try:
             envelope = dtype.Get_envelope()
         except NotImplementedError:
             return
         if ('LAM/MPI' == MPI.get_vendor()[0] and
             "COMPLEX" in dtype.name): continue
         ni, na, nd, combiner = envelope
         self.assertEqual(combiner, MPI.COMBINER_NAMED)
         self.assertEqual(ni, 0)
         self.assertEqual(na, 0)
         self.assertEqual(nd, 0)
开发者ID:erdc-cm,项目名称:mpi4py,代码行数:13,代码来源:test_datatype.py


示例20: testGetEnvelope

 def testGetEnvelope(self):
     for dtype in datatypes:
         try:
             envelope = dtype.Get_envelope()
         except NotImplementedError:
             self.skipTest('mpi-type-get_envelope')
         if ('LAM/MPI' == MPI.get_vendor()[0] and
             "COMPLEX" in dtype.name): continue
         ni, na, nd, combiner = envelope
         self.assertEqual(combiner, MPI.COMBINER_NAMED)
         self.assertEqual(ni, 0)
         self.assertEqual(na, 0)
         self.assertEqual(nd, 0)
         self.assertEqual(dtype.envelope, envelope)
         self.assertEqual(dtype.combiner, combiner)
         self.assertTrue(dtype.is_named)
         self.assertTrue(dtype.is_predefined)
         otype = dtype.decode()
         self.assertTrue(dtype is otype)
开发者ID:benkirk,项目名称:mpi_playground,代码行数:19,代码来源:test_datatype.py



注:本文中的mpi4py.MPI类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python mpiunittest.main函数代码示例发布时间:2022-05-27
下一篇:
Python mpi.mpi_finalize函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap