• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ MPI_Address函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中MPI_Address函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Address函数的具体用法?C++ MPI_Address怎么用?C++ MPI_Address使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了MPI_Address函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: MPI_Address

 void peano::applications::faxen::repositories::FaxenBatchJobRepositoryStatePacked::initDatatype() {
    const int Attributes = 2;
    MPI_Datatype subtypes[Attributes] = {
       MPI_INT,		 //action
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //action
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    FaxenBatchJobRepositoryStatePacked dummyFaxenBatchJobRepositoryStatePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyFaxenBatchJobRepositoryStatePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyFaxenBatchJobRepositoryStatePacked[0]._persistentRecords._action))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyFaxenBatchJobRepositoryStatePacked[1]._persistentRecords._action))), 		&disp[1] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &FaxenBatchJobRepositoryStatePacked::Datatype );
    MPI_Type_commit( &FaxenBatchJobRepositoryStatePacked::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:30,代码来源:FaxenBatchJobRepositoryState.cpp


示例2: MPI_Address

 void peano::applications::latticeboltzmann::blocklatticeboltzmann::repositories::BlockLatticeBoltzmannBatchJobRepositoryState::initDatatype() {
    const int Attributes = 3;
    MPI_Datatype subtypes[Attributes] = {
       MPI_INT,		 //action
       MPI_CHAR,		 //reduceState
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //action
       1,		 //reduceState
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    BlockLatticeBoltzmannBatchJobRepositoryState dummyBlockLatticeBoltzmannBatchJobRepositoryState[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[0]._persistentRecords._action))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[0]._persistentRecords._reduceState))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[1]._persistentRecords._action))), 		&disp[2] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &BlockLatticeBoltzmannBatchJobRepositoryState::Datatype );
    MPI_Type_commit( &BlockLatticeBoltzmannBatchJobRepositoryState::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:33,代码来源:BlockLatticeBoltzmannBatchJobRepositoryState.cpp


示例3: mytype_commit

void mytype_commit(struct mystruct value){

  MPI_Aint indices[3];
  int blocklens[3];
  MPI_Datatype old_types[3];

  old_types[0] = MPI_CHAR;
  old_types[1] = MPI_INT;
  old_types[2] = MPI_DOUBLE;

  blocklens[0] = 1;
  blocklens[1] = 3;
  blocklens[2] = 5;

  MPI_Address(&value.ch, &indices[0]);
  MPI_Address(&value.a, &indices[1]);
  MPI_Address(&value.x, &indices[2]);

  indices[2] = indices[2] - indices[0];
  indices[1] = indices[1] - indices[0];
  indices[0] = 0;

  MPI_Type_struct(3,blocklens,indices,old_types,&mpistruct);

  MPI_Type_commit(&mpistruct);
}
开发者ID:akihiko-fujii,项目名称:mpi1,代码行数:26,代码来源:b.c


示例4: Build_type

void Build_type( float* a, float* b, float* n, MPI_Datatype* point_t ) {

    int block_lengths[3];
    MPI_Aint displacements[3];
    MPI_Datatype typelist[3];
    MPI_Aint start_address;
    MPI_Aint address;

    block_lengths[0] = block_lengths[1] = block_lengths[2] = 1;
    typelist[0] = MPI_FLOAT;
    typelist[1] = MPI_FLOAT;
    typelist[2] = MPI_INT;

    displacements[0] = 0;
    MPI_Address(a, &start_address);
    MPI_Address(b, &address);
    displacements[1] = address - start_address;
    
    MPI_Address(n, &address);
    displacements[2] = address - start_address;

    MPI_Type_struct(3, block_lengths, displacements, typelist, point_t);
    MPI_Type_commit(point_t);

}
开发者ID:ajdecon,项目名称:play,代码行数:25,代码来源:derived.c


示例5: InitializeMPIStuff

static void 
InitializeMPIStuff(void)
{
    const int n = 5;
    int          lengths[n]       = {1, 1, 1, 1, 1};
    MPI_Aint     displacements[n] = {0, 0, 0, 0, 0};
    MPI_Datatype types[n] = {MPI_FLOAT,
                             MPI_UNSIGNED_CHAR,
                             MPI_UNSIGNED_CHAR,
                             MPI_UNSIGNED_CHAR,
                             MPI_UNSIGNED_CHAR};

    // create the MPI data type for Pixel
    Pixel onePixel;
    MPI_Address(&onePixel.z, &displacements[0]);
    MPI_Address(&onePixel.r, &displacements[1]);
    MPI_Address(&onePixel.g, &displacements[2]);
    MPI_Address(&onePixel.b, &displacements[3]);
    MPI_Address(&onePixel.a, &displacements[4]);
    for (int i = n-1; i >= 0; i--)
        displacements[i] -= displacements[0];
    MPI_Type_struct(n, lengths, displacements, types,
                    &mpiTypePixel);
    MPI_Type_commit(&mpiTypePixel);

    // and the merge operation for a reduction
    MPI_Op_create((MPI_User_function *)MergePixelBuffersOp, 1,
                  &mpiOpMergePixelBuffers);
}
开发者ID:mclarsen,项目名称:EAVL,代码行数:29,代码来源:eavlCompositor.cpp


示例6: MPI_Address

 void peano::applications::latticeboltzmann::blocklatticeboltzmann::forcerecords::BlockPositionPacked::initDatatype() {
    const int Attributes = 2;
    MPI_Datatype subtypes[Attributes] = {
       MPI_DOUBLE,		 //_blockPosition
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       DIMENSIONS,		 //_blockPosition
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    BlockPositionPacked dummyBlockPositionPacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockPositionPacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockPositionPacked[0]._persistentRecords._blockPosition[0]))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&dummyBlockPositionPacked[1]._persistentRecords._blockPosition[0])), 		&disp[1] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &BlockPositionPacked::Datatype );
    MPI_Type_commit( &BlockPositionPacked::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:30,代码来源:BlockPosition.cpp


示例7: MPI_Address

 void peano::applications::navierstokes::prototype1::repositories::PrototypeRepositoryStatePacked::initDatatype() {
    const int Attributes = 3;
    MPI_Datatype subtypes[Attributes] = {
       MPI_INT,		 //action
       MPI_CHAR,		 //reduceState
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //action
       1,		 //reduceState
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    PrototypeRepositoryStatePacked dummyPrototypeRepositoryStatePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[0]._persistentRecords._action))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[0]._persistentRecords._reduceState))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[1]._persistentRecords._action))), 		&disp[2] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &PrototypeRepositoryStatePacked::Datatype );
    MPI_Type_commit( &PrototypeRepositoryStatePacked::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:33,代码来源:PrototypeRepositoryState.cpp


示例8: Build_derived_type

void Build_derived_type(border* indata, MPI_Datatype* message_type_ptr){
  int block_lengths[3];

  MPI_Aint displacements[3];
  MPI_Aint addresses[4];
  MPI_Datatype typelist[3];

  /* Создает производный тип данных, содержащий три int */

  /* Сначала нужно определить типы элементов */

  typelist[0]=MPI_INT;
  typelist[1]=MPI_INT; 
  typelist[2]=MPI_INT;

 

  /* Определить количество элементов каждого типа */
  block_lengths[0]=block_lengths[1]=block_lengths[2] = 1;
  
  /* Вычислить смещения элементов * относительно indata */
  MPI_Address(indata, &addresses[0]);
  MPI_Address(&(indata->left), &addresses[1]);
  MPI_Address(&(indata->right), &addresses[2]);
  MPI_Address(&(indata->length), &addresses[3]);

  displacements[0]=addresses[1]-addresses[0];
  displacements[1]=addresses[2]-addresses[0];
  displacements[2]=addresses[3]-addresses[0];
  
  /* Создать производный тип */
  MPI_Type_struct(3, block_lengths, displacements,typelist, message_type_ptr);
  /* Зарегистрировать его для использования */
  MPI_Type_commit(message_type_ptr);
} /* Build_derived_type */
开发者ID:stardreamer,项目名称:ParallelLab,代码行数:35,代码来源:build_derived_type.c


示例9: MPI_Address

 void peano::integration::partitioncoupling::builtin::records::ForceTorquePacked::initDatatype() {
    const int Attributes = 3;
    MPI_Datatype subtypes[Attributes] = {
       MPI_DOUBLE,		 //_translationalForce
       MPI_DOUBLE,		 //_torque
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       3,		 //_translationalForce
       3,		 //_torque
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    ForceTorquePacked dummyForceTorquePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyForceTorquePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyForceTorquePacked[0]._persistentRecords._translationalForce[0]))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyForceTorquePacked[0]._persistentRecords._torque[0]))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&dummyForceTorquePacked[1]._persistentRecords._translationalForce[0])), 		&disp[2] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &ForceTorquePacked::Datatype );
    MPI_Type_commit( &ForceTorquePacked::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:33,代码来源:ForceTorque.cpp


示例10: MPI_Address

 void tarch::parallel::messages::RegisterAtNodePoolMessagePacked::initDatatype() {
    const int Attributes = 2;
    MPI_Datatype subtypes[Attributes] = {
       MPI_SHORT,		 //nodeName
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       MPI_MAX_NAME_STRING_ADDED_ONE,		 //nodeName
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    RegisterAtNodePoolMessagePacked dummyRegisterAtNodePoolMessagePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegisterAtNodePoolMessagePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegisterAtNodePoolMessagePacked[0]._persistentRecords._nodeName[0]))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&dummyRegisterAtNodePoolMessagePacked[1]._persistentRecords._nodeName[0])), 		&disp[1] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegisterAtNodePoolMessagePacked::Datatype );
    MPI_Type_commit( &RegisterAtNodePoolMessagePacked::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:30,代码来源:RegisterAtNodePoolMessage.cpp


示例11: initialiseType

/**
 * Initialises the command package MPI type, we use this to illustrate how additional information (this case
 * the parent rank) can be associated with commands
 */
static void initialiseType() {
    struct PP_Control_Package package;
    MPI_Aint pckAddress, dataAddress;
    MPI_Address(&package, &pckAddress);
    MPI_Address(&package.data, &dataAddress);
    int blocklengths[3] = {1,1}, nitems=2;
    MPI_Datatype types[3] = {MPI_CHAR, MPI_INT};
    MPI_Aint offsets[3] = {0, dataAddress - pckAddress};
    MPI_Type_create_struct(nitems, blocklengths, offsets, types, &PP_COMMAND_TYPE);
    MPI_Type_commit(&PP_COMMAND_TYPE);
}
开发者ID:konstantinos-mouza,项目名称:pdp-frogs,代码行数:15,代码来源:pool.c


示例12: MPI_Address

 void peano::applications::puregrid::records::RegularGridStatePacked::initDatatype() {
    const int Attributes = 9;
    MPI_Datatype subtypes[Attributes] = {
       MPI_INT,		 //maxRefinementsPerIteration
       MPI_DOUBLE,		 //meshWidth
       MPI_DOUBLE,		 //numberOfInnerVertices
       MPI_DOUBLE,		 //numberOfBoundaryVertices
       MPI_DOUBLE,		 //numberOfOuterVertices
       MPI_DOUBLE,		 //numberOfInnerCells
       MPI_DOUBLE,		 //numberOfOuterCells
       MPI_SHORT,		 //_packedRecords0
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //maxRefinementsPerIteration
       DIMENSIONS,		 //meshWidth
       1,		 //numberOfInnerVertices
       1,		 //numberOfBoundaryVertices
       1,		 //numberOfOuterVertices
       1,		 //numberOfInnerCells
       1,		 //numberOfOuterCells
       1,		 //_packedRecords0
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    RegularGridStatePacked dummyRegularGridStatePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._maxRefinementsPerIteration))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._meshWidth[0]))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfInnerVertices))), 		&disp[2] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfBoundaryVertices))), 		&disp[3] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfOuterVertices))), 		&disp[4] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfInnerCells))), 		&disp[5] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfOuterCells))), 		&disp[6] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._packedRecords0))), 		&disp[7] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[1]._persistentRecords._maxRefinementsPerIteration))), 		&disp[8] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegularGridStatePacked::Datatype );
    MPI_Type_commit( &RegularGridStatePacked::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:51,代码来源:RegularGridState.cpp


示例13: MPI_Address

 void peano::applications::poisson::multigrid::records::RegularGridState::initDatatype() {
    const int Attributes = 9;
    MPI_Datatype subtypes[Attributes] = {
       MPI_DOUBLE,		 //omega
       MPI_DOUBLE,		 //meshWidth
       MPI_DOUBLE,		 //numberOfInnerVertices
       MPI_DOUBLE,		 //numberOfBoundaryVertices
       MPI_DOUBLE,		 //numberOfOuterVertices
       MPI_DOUBLE,		 //numberOfInnerCells
       MPI_DOUBLE,		 //numberOfOuterCells
       MPI_CHAR,		 //gridIsStationary
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //omega
       DIMENSIONS,		 //meshWidth
       1,		 //numberOfInnerVertices
       1,		 //numberOfBoundaryVertices
       1,		 //numberOfOuterVertices
       1,		 //numberOfInnerCells
       1,		 //numberOfOuterCells
       1,		 //gridIsStationary
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    RegularGridState dummyRegularGridState[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._omega))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._meshWidth[0]))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfInnerVertices))), 		&disp[2] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfBoundaryVertices))), 		&disp[3] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfOuterVertices))), 		&disp[4] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfInnerCells))), 		&disp[5] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfOuterCells))), 		&disp[6] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._gridIsStationary))), 		&disp[7] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[1]._persistentRecords._omega))), 		&disp[8] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegularGridState::Datatype );
    MPI_Type_commit( &RegularGridState::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:51,代码来源:RegularGridState.cpp


示例14: make_maskbase_struct

void make_maskbase_struct(void)
{
    int blockcounts[2] = { 6, 5 };
    MPI_Datatype types[2] = { MPI_DOUBLE, MPI_INT };
    MPI_Aint displs[2];
    maskbase mbase;

    MPI_Address(&mbase.timesigma, &displs[0]);
    MPI_Address(&mbase.numchan, &displs[1]);
    displs[1] -= displs[0];
    displs[0] = 0;
    MPI_Type_struct(2, blockcounts, displs, types, &maskbase_type);
    MPI_Type_commit(&maskbase_type);
}
开发者ID:MilesCranmer,项目名称:presto,代码行数:14,代码来源:mpiprepsubband_utils.c


示例15: append_to_message_real

 //!
 //! \brief
 //!
 void append_to_message_real(
   std::vector< MPI_Aint >& displ
   , std::vector< int >& count
   )
 {
   MPI_Aint addr;
   // Append cell composition
   MPI_Address(&phi[0],&addr);
   displ.push_back(addr);
   count.push_back(phi.size());
   // Append other properties
   MPI_Address(&scalars,&addr);
   displ.push_back(addr);
   count.push_back(scalars.size());
 }
开发者ID:phpisciuneri,项目名称:tg,代码行数:18,代码来源:scalar_averages.hpp


示例16: MPI_Address

 void peano::kernel::regulargrid::tests::records::TestCell::initDatatype() {
    const int Attributes = 1;
    MPI_Datatype subtypes[Attributes] = {
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    TestCell dummyTestCell[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyTestCell[0]))), &base);
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &TestCell::Datatype );
    MPI_Type_commit( &TestCell::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:25,代码来源:TestCell.cpp


示例17: MPI_Address

 void peano::applications::poisson::jacobitutorial::records::RegularGridCell::initDatatype() {
    const int Attributes = 1;
    MPI_Datatype subtypes[Attributes] = {
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    RegularGridCell dummyRegularGridCell[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridCell[0]))), &base);
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegularGridCell::Datatype );
    MPI_Type_commit( &RegularGridCell::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:25,代码来源:RegularGridCell.cpp


示例18: VTUnify_MPI_Address

VT_MPI_INT VTUnify_MPI_Address( void * location, VTUnify_MPI_Aint * address )
{
   VT_MPI_INT error;

   error = CALL_MPI( MPI_Address( location, (MPI_Aint*)address ) );

   return (error == MPI_SUCCESS) ? 1 : 0;
}
开发者ID:cstatz,项目名称:vt-dl-support,代码行数:8,代码来源:vt_unify_mpi.c


示例19: MPI_Address

 void peano::applications::navierstokes::prototype2::records::RegularGridFluidStateEnhancedDivFreeEulerExplicit::initDatatype() {
    const int Attributes = 8;
    MPI_Datatype subtypes[Attributes] = {
       MPI_DOUBLE,		 //meshWidth
       MPI_DOUBLE,		 //numberOfInnerVertices
       MPI_DOUBLE,		 //numberOfBoundaryVertices
       MPI_DOUBLE,		 //numberOfOuterVertices
       MPI_DOUBLE,		 //numberOfInnerCells
       MPI_DOUBLE,		 //numberOfOuterCells
       MPI_CHAR,		 //gridIsStationary
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       DIMENSIONS,		 //meshWidth
       1,		 //numberOfInnerVertices
       1,		 //numberOfBoundaryVertices
       1,		 //numberOfOuterVertices
       1,		 //numberOfInnerCells
       1,		 //numberOfOuterCells
       1,		 //gridIsStationary
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    RegularGridFluidStateEnhancedDivFreeEulerExplicit dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[0]._persistentRecords._meshWidth[0]))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[0]._persistentRecords._numberOfInnerVertices))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[0]._persistentRecords._numberOfBoundaryVertices))), 		&disp[2] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[0]._persistentRecords._numberOfOuterVertices))), 		&disp[3] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[0]._persistentRecords._numberOfInnerCells))), 		&disp[4] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[0]._persistentRecords._numberOfOuterCells))), 		&disp[5] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[0]._persistentRecords._gridIsStationary))), 		&disp[6] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[1]._persistentRecords._meshWidth.data())), 		&disp[7] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegularGridFluidStateEnhancedDivFreeEulerExplicit::Datatype );
    MPI_Type_commit( &RegularGridFluidStateEnhancedDivFreeEulerExplicit::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:48,代码来源:RegularGridFluidStateEnhancedDivFreeEulerExplicit.cpp


示例20: MPI_Address

void 
avtWholeImageCompositerWithZ::InitializeMPIStuff(void)
{

#define UCH MPI_UNSIGNED_CHAR
#define FLT MPI_FLOAT
   int                lengths[] = {  1,   1,   1,   1};
   MPI_Aint     displacements[] = {  0,   0,   0,   0};
   MPI_Datatype         types[] = {FLT, UCH, UCH, UCH};
   ZFPixel_t    onePixel;
#undef UCH
#undef FLT

   // create the MPI data type for ZFPixel
   MPI_Address(&onePixel.z, &displacements[0]);
   MPI_Address(&onePixel.r, &displacements[1]);
   MPI_Address(&onePixel.g, &displacements[2]);
   MPI_Address(&onePixel.b, &displacements[3]);

   for (int i = 3; i >= 0; --i)
      displacements[i] -= displacements[0];

   MPI_Type_create_struct(4, lengths, displacements, types,
      &avtWholeImageCompositerWithZ::mpiTypeZFPixel);

   // check that the datatype has the correct extent
   MPI_Aint ext;
   MPI_Type_extent(avtWholeImageCompositerWithZ::mpiTypeZFPixel, &ext);
   if (ext != sizeof(onePixel))
   {
       MPI_Datatype tmp = avtWholeImageCompositerWithZ::mpiTypeZFPixel;
       MPI_Type_create_resized(tmp, 0, sizeof(ZFPixel_t),
           &avtWholeImageCompositerWithZ::mpiTypeZFPixel);
       MPI_Type_free(&tmp);
   }

   MPI_Type_commit(&avtWholeImageCompositerWithZ::mpiTypeZFPixel);

   MPI_Op_create((MPI_User_function *)MergeZFPixelBuffers, 1,
      &avtWholeImageCompositerWithZ::mpiOpMergeZFPixelBuffers);
}
开发者ID:cchriste,项目名称:visit,代码行数:41,代码来源:avtWholeImageCompositerWithZ.C



注:本文中的MPI_Address函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ MPI_Allgather函数代码示例发布时间:2022-05-30
下一篇:
C++ MPI_Abort函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap