IPCC  1.0
 All Classes Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
CMPIManager Class Reference

MPI Mangement class. More...

#include "MPIManager.h"

Collaboration diagram for CMPIManager:
Collaboration graph

Classes

struct  COMPLEX_NUMBER
 Complex number. More...
 

Public Member Functions

 CMPIManager ()
 Constructor. More...
 
 ~CMPIManager ()
 Destructor. More...
 

Static Public Member Functions

static bool InitLevel (int nMPILevel, int nFindingDegeneratedEVCount)
 Init MPI Level, most low level is for multi node cacluation for Lanczos. More...
 
static bool CheckDeflationNodeCount (int nNeedNodeCount)
 Checking node counts fix to deflation group. More...
 
static void SetMPIEnviroment (int nRank, int nTotalNode)
 Set MPI Enviroment. More...
 
static void LoadBlancing (int nElementCount)
 Load blancing for MPI, This function for lanczos solving with geometric constrcution. More...
 
static int GetCurrentLoadBalanceCount (int nLBIndex)
 Get Current node's rank load balancing number. More...
 
static int GetLoadBalanceCount (int nRank, int nLBIndex)
 
static int GetCurrentRank ()
 
static int GetCurrentRank (MPI_Comm comm)
 Get Current node's rank number. More...
 
static int GetTotalNodeCount ()
 
static bool IsRootRank ()
 Get Total node count. More...
 
static bool IsRootRank (MPI_Comm comm)
 Check this node is root rank in 'comm' MPI_Comm. More...
 
static bool IsInMPIRoutine ()
 
static void BroadcastVector (CMatrixOperation::CVector *pVector)
 Check this processing running on MPI Enviorment. More...
 
static void BroadcastBool (bool *boolValue, int nRootRank=0)
 Broadcst boolean value. More...
 
static void BroadcastDouble (double *pValue, unsigned int nSize, int nRootRank=0, MPI_Comm comm=MPI_COMM_NULL)
 Broadcst boolean value. More...
 
static void BroadcastInt (int *pValue, unsigned int nSize, int nRootRank=0, MPI_Comm comm=MPI_COMM_NULL)
 Broadcst boolean value. More...
 
static void BroadcastLanczosResult (CLanczosMethod::LPEIGENVALUE_RESULT lpResult, int nIterationCount)
 Broadcast Lanczos result. More...
 
static void MergeVector (CMatrixOperation::CVector *pVector, CMatrixOperation::CVector *pResultVector, unsigned int nMergeSize, int nLBIndex)
 Merge vector to sub rank. More...
 
static void MergeVector (CMatrixOperation::CVector *pVector, unsigned int nMergeSize, int nLBIndex)
 Merge vector to sub rank. More...
 
static void MergeVectorOptimal (CMatrixOperation::CVector *pSrcVector, CMatrixOperation::CVector *pResultVector, unsigned int nMergeSize, double fFirstIndex, int nLBIndex)
 Merge vector to sub rank, operated without vector class member function call. More...
 
static void MergeVectorEx_Optimal (CMatrixOperation::CVector *pVector, CMatrixOperation::CVector *pResultVector, unsigned int nMergeSize, double fFirstIndex, unsigned int nSizeFromPrevRank, unsigned int nSizeFromNextRank, unsigned int nSizetoPrevRank, unsigned int nSizetoNextRank, unsigned int *mPos, int nLBIndex)
 Merge vector for 1 layer exchanging. More...
 
static void AllReduceComlex (CComplex *pNumber, CTimeMeasurement::MEASUREMENT_INDEX INDEX=CTimeMeasurement::COMM)
 Do all reduce function with CKNComplex. More...
 
static double AllReduceDouble (double fNumber)
 Do all reduce function with CKNComplex. More...
 
static int GetRootRank ()
 
static void FinalizeManager ()
 Get Root rank. More...
 
static int InitCommunicationBufferMetric ()
 Initializing MPI Communication buffer for MVMul. More...
 
static void SendDoubleBufferSync (int nTargetRank, double *pBuffer, int nSize, MPI_Request *req, MPI_Comm commWorld=MPI_COMM_NULL)
 Sending buffer for double data array with sync. More...
 
static void WaitSendDoubleBufferSync (MPI_Request *req)
 Waiting sending double buffer sync function. More...
 
static void ReceiveDoubleBufferSync (int nSourceRank, double *pBuffer, int nSize, MPI_Request *req, MPI_Comm commWorld=MPI_COMM_NULL)
 Receivinging buffer for double data array with sync. More...
 
static void WaitReceiveDoubleBufferAsync (MPI_Request *req)
 Waiting recevinging double buffer sync function. More...
 
static MPI_Comm GetMPIComm ()
 
static bool IsMultiLevelMPI ()
 Get MPI_Comm. More...
 
static void BarrierAllComm ()
 Is Multilevel MPI Setting. More...
 
static void Barrier ()
 
static bool IsLanczosComputeRoot ()
 Barrier current deflation group. More...
 
static bool IsDeflationRoot ()
 Checking is root rank of Lanczos computation. More...
 
static int * GetEigenvalueCountFromDeflationGroup (int nDeflationGroupCount, int nLocalEVCount)
 Checking is root rank of Deflation computation. More...
 
static void GatherVDouble (int nSourceCount, double *pReceiveBuffer, int *pSourceCount, double *pSendBuffer, int nSendCount, MPI_Comm comm=MPI_COMM_NULL)
 GatherV for double wrapping function. More...
 
static void GatherVInt (int nSourceCount, int *pReceiveBuffer, int *pSourceCount, int *pSendBuffer, int nSendCount, MPI_Comm comm=MPI_COMM_NULL)
 GahterV for int wrapping function. More...
 
static void GatherEVFromDeflationGroup (int nSourceCount, double *pReceiveBuffer, int *pSourceCount, double *pSendBuffer, int nSendCount)
 
static void GatherEVIterationFromDeflationGroup (int nSourceCount, int *pReceiveBuffer, int *pSourceCount, int *pSendBuffer, int nSendCount)
 Gather eigenvalue from deflation group. More...
 
static void ExchangeCommand (double *pfCommand, MPI_Comm comm)
 Gather eigenvalue finding iteration number from deflation group. More...
 
static MPI_Comm GetLanczosComputComm ()
 
static MPI_Comm GetDeflationComm ()
 Getting Lanczos computing group MPI_Comm. More...
 
static void SendVectorSync (int nTargetRank, CMatrixOperation::CVector *pVector, int nSize, MPI_Request *req, MPI_Comm commWorld=MPI_COMM_NULL)
 Getting Deflation computing group MPI_Comm. More...
 
static void ReceiveVectorSync (int nSourceRank, CMatrixOperation::CVector *pVector, int nSize, MPI_Request *req, MPI_Comm commWorld=MPI_COMM_NULL)
 Receiving Vector with sync. More...
 
static unsigned int GetLanczosGroupIndex ()
 
static bool InitMPIEnv (bool &bMPI, CCommandFileParser::LPINPUT_CMD_PARAM lpParam)
 Getting Lanczos group index. More...
 
static void SetPhiTid (int *tid)
 

Private Types

typedef struct
CMPIManager::COMPLEX_NUMBER
LPCOMPLEX_NUMBER
 

Static Private Attributes

static int m_nCurrentRank = 0
 MPI Rank. More...
 
static int m_nCommWorldRank = 0
 MPI Rank before split. More...
 
static int m_nTotalNode = 1
 Total node count. More...
 
static bool m_bStartMPI = false
 MPI_Init call or not. More...
 
static std::vector< int * > m_vectLoadBalance
 Load blancing for MPI Communication. More...
 
static LPCOMPLEX_NUMBER m_pCommBuffer = NULL
 Data buffer for MPI Communication. More...
 
static LPCOMPLEX_NUMBER m_pConvertingBuffer = NULL
 Data buffer for Vector converting. More...
 
static std::vector< int * > m_vectRecvCount
 Reciving count variable for MPI comminication. More...
 
static std::vector< int * > m_vectSendCount
 Sending count variable for MPI comminication. More...
 
static int * m_pBankInfo = NULL
 After MPI Split bank infomation. More...
 
static std::vector< int * > m_vectDispls
 Displ for MPI comminication. More...
 
static MPI_Request m_SendDoubleAsyncRequest = MPI_REQUEST_NULL
 Request for sending double. More...
 
static MPI_Request m_ReceiveDoubleAsyncRequest = MPI_REQUEST_NULL
 Request for receving double. More...
 
static unsigned int m_nMPILevel = 1
 MPI Level. More...
 
static bool m_bNeedPostOperation [10] = { false, false, false, false, false, false, false, false, false, false }
 MPI Level. More...
 
static MPI_Comm m_mpiCommIndex = MPI_COMM_WORLD
 Lanczos Method MPI_Comm. More...
 
static MPI_Comm m_deflationComm = MPI_COMM_NULL
 Deflation computing MPI_Comm. More...
 
static MPI_Group m_lanczosGroup = MPI_GROUP_EMPTY
 MPI Group for Lanczos computation. More...
 
static MPI_Group m_deflationGroup = MPI_GROUP_EMPTY
 MPI Group for Deflation computation. More...
 
static unsigned int m_nLanczosGroupIndex = 0
 MPI Group index for Lanczos group. More...
 
static bool m_bMultiLevel = false
 Flag for Multilevel MPI group. More...
 
static int m_nLBCount = 0
 

Detailed Description

MPI Mangement class.

Date
2014/8/25

Definition at line 19 of file MPIManager.h.

Member Typedef Documentation

Constructor & Destructor Documentation

CMPIManager::CMPIManager ( )

Constructor.

Definition at line 39 of file MPIManager.cpp.

40 {
41 
42 }
CMPIManager::~CMPIManager ( )

Destructor.

Definition at line 44 of file MPIManager.cpp.

45 {
46 }

Member Function Documentation

void CMPIManager::AllReduceComlex ( CComplex pNumber,
CTimeMeasurement::MEASUREMENT_INDEX  INDEX = CTimeMeasurement::COMM 
)
static

Do all reduce function with CKNComplex.

Parameters
pNumberVariable that want to sum
INDEXTime measurement index

Definition at line 571 of file MPIManager.cpp.

References CComplex::GetImaginaryNumber(), CComplex::GetRealNumber(), m_mpiCommIndex, CTimeMeasurement::MeasurementEnd(), CTimeMeasurement::MeasurementStart(), and CComplex::SetComplexNumber().

Referenced by CMatrixOperation::VVDot().

572 {
573 #ifdef DISABLE_MPI_ROUTINE
574  return;
575 #endif
576 
577  double fSend[2], fRecv[2];
578 
579  fSend[0] = pNumber->GetRealNumber();
580  fSend[1] = pNumber->GetImaginaryNumber();
581 
583  MPI_Allreduce(fSend, fRecv, 2, MPI_DOUBLE, MPI_SUM, m_mpiCommIndex);
585 
586  pNumber->SetComplexNumber(fRecv[0], fRecv[1]);
587 }
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.
void SetComplexNumber(double fReal, double fImaginaray)
Set Complex number using real part and imaginary part.
Definition: Complex.cpp:58
double GetImaginaryNumber() const
Get imaginary part.
Definition: Complex.h:25
double GetRealNumber() const
Get real part.
Definition: Complex.h:24

Here is the call graph for this function:

Here is the caller graph for this function:

double CMPIManager::AllReduceDouble ( double  fNumber)
static

Do all reduce function with CKNComplex.

Parameters
fNumberVariable that want to sum
Returns
Reducing result

Definition at line 593 of file MPIManager.cpp.

References CTimeMeasurement::COMM, m_mpiCommIndex, CTimeMeasurement::MeasurementEnd(), and CTimeMeasurement::MeasurementStart().

Referenced by CGeometricShape::CalculateUnitcellCount(), CSPLoop::ConstructionGeometric(), CMatrixOperation::CVector::GetNorm(), and CTBMS_Solver::Launching_TBMS_Solver().

594 {
595 #ifdef DISABLE_MPI_ROUTINE
596  return fNumber;
597 #endif
598 
599  double fRecv;
600 
602  MPI_Allreduce(&fNumber, &fRecv, 1, MPI_DOUBLE, MPI_SUM, m_mpiCommIndex);
604 
605  return fRecv;
606 }
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.

Here is the call graph for this function:

Here is the caller graph for this function:

static void CMPIManager::Barrier ( )
inlinestatic

Definition at line 67 of file MPIManager.h.

References m_mpiCommIndex.

Referenced by CSPLoop::DumpSolution(), and CLanczosMethod::SaveLanczosResult().

67 { MPI_Barrier(m_mpiCommIndex); };
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100

Here is the caller graph for this function:

void CMPIManager::BarrierAllComm ( )
static

Is Multilevel MPI Setting.

Barrier MPI_COMM_WORLD

< Caution! This function wait all rank even if Comm split into several colors

Definition at line 733 of file MPIManager.cpp.

Referenced by CLanczosMethod::MergeDegeneratedEigenvalues().

734 {
736  MPI_Barrier(MPI_COMM_WORLD);
737 }

Here is the caller graph for this function:

void CMPIManager::BroadcastBool ( bool *  boolValue,
int  nRootRank = 0 
)
static

Broadcst boolean value.

Parameters
boolValuebool variable for want to broadcasting
nRootRankRoot rank index

Definition at line 470 of file MPIManager.cpp.

References CTimeMeasurement::COMM, m_mpiCommIndex, CTimeMeasurement::MeasurementEnd(), and CTimeMeasurement::MeasurementStart().

Referenced by CLanczosMethod::LanczosIterationLoop().

471 {
472 #ifdef DISABLE_MPI_ROUTINE
473  return;
474 #endif
475 
477  MPI_Bcast(boolValue, 1, MPI_C_BOOL, nRootRank, m_mpiCommIndex);
479 }
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.

Here is the call graph for this function:

Here is the caller graph for this function:

void CMPIManager::BroadcastDouble ( double *  pValue,
unsigned int  nSize,
int  nRootRank = 0,
MPI_Comm  comm = MPI_COMM_NULL 
)
static

Broadcst boolean value.

Parameters
pValuedouble data buffer for want to broadcasting
nSizeData buffer size
nRootRankRoot rank index

Definition at line 486 of file MPIManager.cpp.

References CTimeMeasurement::COMM, m_mpiCommIndex, CTimeMeasurement::MeasurementEnd(), and CTimeMeasurement::MeasurementStart().

Referenced by CLanczosMethod::DoResidualCheck(), CGeometricShape::ExchangeAtomInfoBetweenNode(), ExchangeCommand(), CLanczosMethod::MergeDegeneratedEigenvalues(), and CSPLoop::SolveSchroedinger().

487 {
488 #ifdef DISABLE_MPI_ROUTINE
489  return;
490 #endif
491 
493  if( MPI_COMM_NULL == comm )
494  MPI_Bcast(pValue, nSize, MPI_DOUBLE, nRootRank, m_mpiCommIndex);
495  else
496  MPI_Bcast(pValue, nSize, MPI_DOUBLE, nRootRank, comm);
498 }
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.

Here is the call graph for this function:

Here is the caller graph for this function:

void CMPIManager::BroadcastInt ( int *  pValue,
unsigned int  nSize,
int  nRootRank = 0,
MPI_Comm  comm = MPI_COMM_NULL 
)
static

Broadcst boolean value.

Parameters
[in,out]pValueBroadcasting variable
nSizeBroadcasting variable size
nRootRankRoot rank number
commBroadcating MPI_Comm range

Definition at line 506 of file MPIManager.cpp.

References CTimeMeasurement::COMM, m_mpiCommIndex, CTimeMeasurement::MeasurementEnd(), and CTimeMeasurement::MeasurementStart().

Referenced by CLanczosMethod::MergeDegeneratedEigenvalues().

507 {
508 #ifdef DISABLE_MPI_ROUTINE
509  return;
510 #endif
511 
513  if( MPI_COMM_NULL == comm )
514  MPI_Bcast(pValue, nSize, MPI_INT, nRootRank, m_mpiCommIndex);
515  else
516  MPI_Bcast(pValue, nSize, MPI_INT, nRootRank, comm);
518 }
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.

Here is the call graph for this function:

Here is the caller graph for this function:

void CMPIManager::BroadcastLanczosResult ( CLanczosMethod::LPEIGENVALUE_RESULT  lpResult,
int  nIterationCount 
)
static

Broadcast Lanczos result.

Parameters
lpResultLanczos method result that want to boardcasting
nIterationCountCurrent iteration count

Definition at line 524 of file MPIManager.cpp.

References CTimeMeasurement::COMM, GetRootRank(), IsRootRank(), m_mpiCommIndex, CTimeMeasurement::MALLOC, CTimeMeasurement::MeasurementEnd(), CTimeMeasurement::MeasurementStart(), CLanczosMethod::EIGENVALUE_RESULT::nEigenValueCount, CLanczosMethod::EIGENVALUE_RESULT::nEigenValueCountForMemeory, CLanczosMethod::EIGENVALUE_RESULT::nEigenVectorSize, CLanczosMethod::EIGENVALUE_RESULT::nMaxEigenValueFoundIteration, CLanczosMethod::EIGENVALUE_RESULT::pEigenValueFoundIteration, and CLanczosMethod::EIGENVALUE_RESULT::pEigenVectors.

Referenced by CLanczosMethod::LanczosIteration().

525 {
526  unsigned int nCastData[4];
527  unsigned int i;
528 
529 #ifdef DISABLE_MPI_ROUTINE
530  return;
531 #endif
532 
533  if( IsRootRank() )
534  {
535  nCastData[0] = lpResult->nEigenValueCount;
536  nCastData[1] = lpResult->nEigenValueCountForMemeory;
537  nCastData[2] = lpResult->nMaxEigenValueFoundIteration;
538  nCastData[3] = lpResult->nEigenVectorSize;
539  }
540 
542  MPI_Bcast(nCastData, 4, MPI_INT, GetRootRank(), m_mpiCommIndex);
544 
545  if( !IsRootRank() )
546  {
547  lpResult->nEigenValueCount = nCastData[0];
548  lpResult->nEigenValueCountForMemeory = nCastData[1];
549  lpResult->nMaxEigenValueFoundIteration = nCastData[2];
550  lpResult->nEigenVectorSize = nCastData[3];
551 
553  lpResult->pEigenValueFoundIteration = (unsigned int*)malloc(sizeof(unsigned int)*lpResult->nEigenValueCount);
554  lpResult->pEigenVectors = (double**)malloc(sizeof(double*)*lpResult->nEigenValueCount);
555  for (i = 0; i < lpResult->nEigenValueCount; ++i)
556  lpResult->pEigenVectors[i] = (double*)malloc(sizeof(double)*lpResult->nEigenVectorSize);
558  }
559 
561  MPI_Bcast(lpResult->pEigenValueFoundIteration, lpResult->nEigenValueCount, MPI_INT, GetRootRank(), m_mpiCommIndex);
562  for (i = 0; i < lpResult->nEigenValueCount; ++i)
563  MPI_Bcast(lpResult->pEigenVectors[i], lpResult->nEigenVectorSize, MPI_DOUBLE, GetRootRank(), m_mpiCommIndex);
565 }
static bool IsRootRank()
Get Total node count.
Definition: MPIManager.cpp:182
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100
static int GetRootRank()
Definition: MPIManager.h:57
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.
unsigned int * pEigenValueFoundIteration
Definition: LanczosMethod.h:37

Here is the call graph for this function:

Here is the caller graph for this function:

void CMPIManager::BroadcastVector ( CMatrixOperation::CVector pVector)
static

Check this processing running on MPI Enviorment.

Broadcast vector to sub rank

Parameters
pVectorVector for want to broadcast
Remarks
This function currently not used 2/Feb/2015

Definition at line 228 of file MPIManager.cpp.

References IsInMPIRoutine().

229 {
230 #ifdef DISABLE_MPI_ROUTINE
231  if (!IsInMPIRoutine())
232  return;
233 #endif
234 }
static bool IsInMPIRoutine()
Definition: MPIManager.h:45

Here is the call graph for this function:

bool CMPIManager::CheckDeflationNodeCount ( int  nNeedNodeCount)
static

Checking node counts fix to deflation group.

Parameters
nNeedNodeCountDeflation group count
Returns
Can make defaltion group or not

Definition at line 129 of file MPIManager.cpp.

References m_nTotalNode.

Referenced by InitLevel().

130 {
131  if( 0 == m_nTotalNode % nNeedNodeCount )
132  return true;
133  else
134  return false;
135 }
static int m_nTotalNode
Total node count.
Definition: MPIManager.h:87

Here is the caller graph for this function:

void CMPIManager::ExchangeCommand ( double *  pfCommand,
MPI_Comm  comm 
)
static

Gather eigenvalue finding iteration number from deflation group.

Exchanging command between MPI_Comm

Parameters
pfCommandCommand buffer
commMPI_Comm for exchaning command

Definition at line 826 of file MPIManager.cpp.

References BroadcastDouble(), and COMMAND_SIZE.

Referenced by CLanczosMethod::MergeDegeneratedEigenvalues().

827 {
828  BroadcastDouble(pfCommand, COMMAND_SIZE, 0, comm);
829 }
static void BroadcastDouble(double *pValue, unsigned int nSize, int nRootRank=0, MPI_Comm comm=MPI_COMM_NULL)
Broadcst boolean value.
Definition: MPIManager.cpp:486
#define COMMAND_SIZE
Definition: Global.h:110

Here is the call graph for this function:

Here is the caller graph for this function:

void CMPIManager::FinalizeManager ( )
static

Get Root rank.

Release all memory

Definition at line 617 of file MPIManager.cpp.

References FREE_MEM, CTimeMeasurement::FREE_MEM, m_bStartMPI, m_deflationComm, m_deflationGroup, m_lanczosGroup, m_mpiCommIndex, m_nCurrentRank, m_nLBCount, m_nTotalNode, m_pBankInfo, m_vectDispls, m_vectLoadBalance, m_vectRecvCount, m_vectSendCount, CTimeMeasurement::MeasurementEnd(), and CTimeMeasurement::MeasurementStart().

Referenced by CTBMS_Solver::FinalEvn(), and SPLoopMain().

618 {
619  int i, nSize;
620 
621  m_bStartMPI = false;
622  m_nCurrentRank = 0;
623  m_nTotalNode = 1;
625 
626 
627  for( i = 0 ;i < m_nLBCount ; ++ i )
628  {
633  }
634  m_vectLoadBalance.clear();
635  m_vectRecvCount.clear();
636  m_vectSendCount.clear();
637  m_vectDispls.clear();
638 
640 
642  if( MPI_GROUP_EMPTY != m_lanczosGroup )
643  MPI_Group_free(&m_lanczosGroup);
644  if( MPI_GROUP_EMPTY != m_deflationGroup )
645  MPI_Group_free(&m_deflationGroup);
646  if( MPI_COMM_NULL != m_mpiCommIndex && MPI_COMM_WORLD != m_mpiCommIndex)
647  MPI_Comm_free(&m_mpiCommIndex);
648  if( MPI_COMM_NULL != m_deflationComm )
649  MPI_Comm_free(&m_deflationComm);
650 }
static std::vector< int * > m_vectLoadBalance
Load blancing for MPI Communication.
Definition: MPIManager.h:89
static int m_nCurrentRank
MPI Rank.
Definition: MPIManager.h:85
static std::vector< int * > m_vectSendCount
Sending count variable for MPI comminication.
Definition: MPIManager.h:93
static MPI_Group m_lanczosGroup
MPI Group for Lanczos computation.
Definition: MPIManager.h:102
static MPI_Comm m_deflationComm
Deflation computing MPI_Comm.
Definition: MPIManager.h:101
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100
static bool m_bStartMPI
MPI_Init call or not.
Definition: MPIManager.h:88
static int * m_pBankInfo
After MPI Split bank infomation.
Definition: MPIManager.h:94
static int m_nTotalNode
Total node count.
Definition: MPIManager.h:87
static MPI_Group m_deflationGroup
MPI Group for Deflation computation.
Definition: MPIManager.h:103
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.
static int m_nLBCount
Definition: MPIManager.h:106
static std::vector< int * > m_vectRecvCount
Reciving count variable for MPI comminication.
Definition: MPIManager.h:92
static std::vector< int * > m_vectDispls
Displ for MPI comminication.
Definition: MPIManager.h:95
#define FREE_MEM(pointer)
Macro for memory allocation and assign null value.
Definition: Global.h:19

Here is the call graph for this function:

Here is the caller graph for this function:

static void CMPIManager::GatherEVFromDeflationGroup ( int  nSourceCount,
double *  pReceiveBuffer,
int *  pSourceCount,
double *  pSendBuffer,
int  nSendCount 
)
inlinestatic

Definition at line 73 of file MPIManager.h.

References GatherVDouble(), and m_deflationComm.

Referenced by CLanczosMethod::MergeDegeneratedEigenvalues().

73 { GatherVDouble(nSourceCount, pReceiveBuffer, pSourceCount, pSendBuffer, nSendCount, m_deflationComm); };
static MPI_Comm m_deflationComm
Deflation computing MPI_Comm.
Definition: MPIManager.h:101
static void GatherVDouble(int nSourceCount, double *pReceiveBuffer, int *pSourceCount, double *pSendBuffer, int nSendCount, MPI_Comm comm=MPI_COMM_NULL)
GatherV for double wrapping function.
Definition: MPIManager.cpp:766

Here is the call graph for this function:

Here is the caller graph for this function:

static void CMPIManager::GatherEVIterationFromDeflationGroup ( int  nSourceCount,
int *  pReceiveBuffer,
int *  pSourceCount,
int *  pSendBuffer,
int  nSendCount 
)
inlinestatic

Gather eigenvalue from deflation group.

Definition at line 74 of file MPIManager.h.

References GatherVInt(), and m_deflationComm.

Referenced by CLanczosMethod::MergeDegeneratedEigenvalues().

74 { GatherVInt(nSourceCount, pReceiveBuffer, pSourceCount, pSendBuffer, nSendCount, m_deflationComm); };
static void GatherVInt(int nSourceCount, int *pReceiveBuffer, int *pSourceCount, int *pSendBuffer, int nSendCount, MPI_Comm comm=MPI_COMM_NULL)
GahterV for int wrapping function.
Definition: MPIManager.cpp:798
static MPI_Comm m_deflationComm
Deflation computing MPI_Comm.
Definition: MPIManager.h:101

Here is the call graph for this function:

Here is the caller graph for this function:

void CMPIManager::GatherVDouble ( int  nSourceCount,
double *  pReceiveBuffer,
int *  pSourceCount,
double *  pSendBuffer,
int  nSendCount,
MPI_Comm  comm = MPI_COMM_NULL 
)
static

GatherV for double wrapping function.

Parameters
nSourceCountGather double buffer source count
[out]pReceiveBufferSaving buffer
pSourceCountSrouce counts (ref. MPI_Gatherv)
pSendBufferSending buffer
nSendCountSending counts
commMPI_Comm for gather data

Definition at line 766 of file MPIManager.cpp.

References CTimeMeasurement::COMM, FREE_MEM, IsDeflationRoot(), m_mpiCommIndex, CTimeMeasurement::MeasurementEnd(), and CTimeMeasurement::MeasurementStart().

Referenced by GatherEVFromDeflationGroup().

767 {
768  int *pReceiveCount = NULL;
769  int *pDisp = NULL;
770  unsigned int i;
771 
772  if( IsDeflationRoot() )
773  {
774  pDisp = (int*)malloc(sizeof(int)*nSourceCount);
775  pDisp[0] = 0;
776  for( i = 1; i < nSourceCount ; ++i)
777  pDisp[i] = pDisp[i-1] + pSourceCount[i-1];
778  }
779 
781  if( MPI_COMM_NULL == comm )
782  MPI_Gatherv(pSendBuffer, nSendCount, MPI_DOUBLE, pReceiveBuffer, pSourceCount, pDisp, MPI_DOUBLE, 0, m_mpiCommIndex);
783  else
784  MPI_Gatherv(pSendBuffer, nSendCount, MPI_DOUBLE, pReceiveBuffer, pSourceCount, pDisp, MPI_DOUBLE, 0, comm);
786 
787  FREE_MEM(pDisp);
788 }
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100
static bool IsDeflationRoot()
Checking is root rank of Lanczos computation.
Definition: MPIManager.h:69
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.
#define FREE_MEM(pointer)
Macro for memory allocation and assign null value.
Definition: Global.h:19

Here is the call graph for this function:

Here is the caller graph for this function:

void CMPIManager::GatherVInt ( int  nSourceCount,
int *  pReceiveBuffer,
int *  pSourceCount,
int *  pSendBuffer,
int  nSendCount,
MPI_Comm  comm = MPI_COMM_NULL 
)
static

GahterV for int wrapping function.

Parameters
nSourceCountGather double buffer source count
[out]pReceiveBufferSaving buffer
pSourceCountSrouce counts (ref. MPI_Gatherv)
pSendBufferSending buffer
nSendCountSending counts
commMPI_Comm for gather data

Definition at line 798 of file MPIManager.cpp.

References CTimeMeasurement::COMM, FREE_MEM, IsDeflationRoot(), m_mpiCommIndex, CTimeMeasurement::MeasurementEnd(), and CTimeMeasurement::MeasurementStart().

Referenced by GatherEVIterationFromDeflationGroup().

799 {
800  int *pReceiveCount = NULL;
801  int *pDisp = NULL;
802  unsigned int i;
803 
804  if( IsDeflationRoot() )
805  {
806  pDisp = (int*)malloc(sizeof(int)*nSourceCount);
807  pDisp[0] = 0;
808  for( i = 1; i < nSourceCount ; ++i)
809  pDisp[i] = pDisp[i-1] + pSourceCount[i-1];
810  }
811 
813  if( MPI_COMM_NULL == comm )
814  MPI_Gatherv(pSendBuffer, nSendCount, MPI_INT, pReceiveBuffer, pSourceCount, pDisp, MPI_INT, 0, m_mpiCommIndex);
815  else
816  MPI_Gatherv(pSendBuffer, nSendCount, MPI_INT, pReceiveBuffer, pSourceCount, pDisp, MPI_INT, 0, comm);
818 
819  FREE_MEM(pDisp);
820 }
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100
static bool IsDeflationRoot()
Checking is root rank of Lanczos computation.
Definition: MPIManager.h:69
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.
#define FREE_MEM(pointer)
Macro for memory allocation and assign null value.
Definition: Global.h:19

Here is the call graph for this function:

Here is the caller graph for this function:

int CMPIManager::GetCurrentLoadBalanceCount ( int  nLBIndex)
static

Get Current node's rank load balancing number.

Definition at line 608 of file MPIManager.cpp.

References m_nCurrentRank, and m_vectLoadBalance.

Referenced by CLanczosMethod::BuildWaveFunction(), CLanczosMethod::CalculateEigenVector(), CLanczosMethod::DoResidualCheck(), CSPLoop::executeSPLoop(), CLanczosMethod::InitLanczosVector(), CLanczosMethod::LanczosIteration(), CLanczosMethod::LanczosIterationLoop(), CLanczosMethod::MergeDegeneratedEigenvalues(), MergeVector(), MergeVectorEx_Optimal(), MergeVectorOptimal(), CLanczosMethod::RecalcuWaveFunction(), and CLanczosMethod::SortSolution().

609 {
610 #ifdef DISABLE_MPI_ROUTINE
611  return 0;
612 #endif
613 
614  return m_vectLoadBalance[nLBIndex][m_nCurrentRank];
615 }
static std::vector< int * > m_vectLoadBalance
Load blancing for MPI Communication.
Definition: MPIManager.h:89
static int m_nCurrentRank
MPI Rank.
Definition: MPIManager.h:85

Here is the caller graph for this function:

int CMPIManager::GetCurrentRank ( MPI_Comm  comm)
static

Get Current node's rank number.

Get Current node's rank number

Parameters
commMPI_Comm
Returns
Current rank in 'comm' MPI_Comm

Definition at line 216 of file MPIManager.cpp.

217 {
218  int rank;
219 
220  MPI_Comm_rank(comm, &rank);
221  return rank;
222 }
static MPI_Comm CMPIManager::GetDeflationComm ( )
inlinestatic

Getting Lanczos computing group MPI_Comm.

Definition at line 77 of file MPIManager.h.

References m_deflationComm.

Referenced by CLanczosMethod::MergeDegeneratedEigenvalues().

77 { return m_deflationComm; };
static MPI_Comm m_deflationComm
Deflation computing MPI_Comm.
Definition: MPIManager.h:101

Here is the caller graph for this function:

int * CMPIManager::GetEigenvalueCountFromDeflationGroup ( int  nDeflationGroupCount,
int  nLocalEVCount 
)
static

Checking is root rank of Deflation computation.

Collecting total eigenvalue count from All deflation group

Parameters
nDeflationGroupCountDeflation group counts
nLocalEVCountLocal deflation group eigenvalue counts
Returns
Merged eigenvalue counts

Definition at line 744 of file MPIManager.cpp.

References CTimeMeasurement::COMM, IsRootRank(), m_deflationComm, CTimeMeasurement::MeasurementEnd(), and CTimeMeasurement::MeasurementStart().

Referenced by CLanczosMethod::MergeDegeneratedEigenvalues().

745 {
746  int *pEVCount = NULL;
747 
749  pEVCount = (int*)malloc(sizeof(int)*nDeflationGroupCount);
750 
752  MPI_Gather(&nLocalEVCount, 1, MPI_INT, pEVCount, 1, MPI_INT, 0, m_deflationComm);
754 
755  return pEVCount;
756 }
static bool IsRootRank()
Get Total node count.
Definition: MPIManager.cpp:182
static MPI_Comm m_deflationComm
Deflation computing MPI_Comm.
Definition: MPIManager.h:101
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.

Here is the call graph for this function:

Here is the caller graph for this function:

static MPI_Comm CMPIManager::GetLanczosComputComm ( )
inlinestatic

Definition at line 76 of file MPIManager.h.

References m_mpiCommIndex.

Referenced by CLanczosMethod::MergeDegeneratedEigenvalues().

76 { return m_mpiCommIndex; };
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100

Here is the caller graph for this function:

static unsigned int CMPIManager::GetLanczosGroupIndex ( )
inlinestatic

Definition at line 80 of file MPIManager.h.

References m_nLanczosGroupIndex.

Referenced by CLanczosMethod::LanczosIteration(), and CLanczosMethod::MergeDegeneratedEigenvalues().

80 { return m_nLanczosGroupIndex; };
static unsigned int m_nLanczosGroupIndex
MPI Group index for Lanczos group.
Definition: MPIManager.h:104

Here is the caller graph for this function:

int CMPIManager::GetLoadBalanceCount ( int  nRank,
int  nLBIndex 
)
static
Parameters
nRankTarget rank index

Definition at line 168 of file MPIManager.cpp.

References m_nTotalNode, and m_vectLoadBalance.

Referenced by CMatrixOperation::AllocateLocalCSR(), InitCommunicationBufferMetric(), MergeVectorEx_Optimal(), and MergeVectorOptimal().

169 {
170 #ifdef DISABLE_MPI_ROUTINE
171  return 0;
172 #endif
173  if (nRank > m_nTotalNode)
174  return 0;
175 
176  return m_vectLoadBalance[nLBIndex][nRank];
177 }
static std::vector< int * > m_vectLoadBalance
Load blancing for MPI Communication.
Definition: MPIManager.h:89
static int m_nTotalNode
Total node count.
Definition: MPIManager.h:87

Here is the caller graph for this function:

static MPI_Comm CMPIManager::GetMPIComm ( )
inlinestatic

Definition at line 64 of file MPIManager.h.

References m_mpiCommIndex.

Referenced by CMatrixOperation::AllocateLocalCSR(), and CLanczosMethod::LanczosIterationLoop().

64 { return m_mpiCommIndex; };
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100

Here is the caller graph for this function:

static int CMPIManager::GetRootRank ( )
inlinestatic

Definition at line 57 of file MPIManager.h.

Referenced by BroadcastLanczosResult().

57 { return 0;};

Here is the caller graph for this function:

int CMPIManager::InitCommunicationBufferMetric ( )
static

Initializing MPI Communication buffer for MVMul.

Parameters
nMatrixSizeMatrix size that want to solving

Definition at line 655 of file MPIManager.cpp.

References GetLoadBalanceCount(), GetTotalNodeCount(), m_nLBCount, m_vectDispls, m_vectRecvCount, m_vectSendCount, CTimeMeasurement::MALLOC, CTimeMeasurement::MeasurementEnd(), and CTimeMeasurement::MeasurementStart().

Referenced by CTBMS_Solver::AllocateCSR(), CSPLoop::AllocateCSR(), and CLanczosLaunching::LaunchingLanczos().

656 {
657  unsigned int i;
658  int *pRecvCount = (int*)malloc(sizeof(int)*GetTotalNodeCount());
659  int *pSendCount = (int*)malloc(sizeof(int)*GetTotalNodeCount());
660  int *pDispls = (int*)malloc(sizeof(int)*GetTotalNodeCount());
661 
663  pDispls[0] = 0;
664  pRecvCount[0] = GetLoadBalanceCount(0, m_nLBCount);
665  for (i = 1; i < (unsigned int)GetTotalNodeCount(); i++)
666  {
667  pRecvCount[i] = GetLoadBalanceCount(i, m_nLBCount);
668  pDispls[i] = pDispls[i - 1] + GetLoadBalanceCount(i - 1, m_nLBCount);
669  }
671 
672  m_vectRecvCount.push_back(pRecvCount);
673  m_vectSendCount.push_back(pSendCount);
674  m_vectDispls.push_back(pDispls);
675 
676  return m_nLBCount++;
677 }
static std::vector< int * > m_vectSendCount
Sending count variable for MPI comminication.
Definition: MPIManager.h:93
static int GetLoadBalanceCount(int nRank, int nLBIndex)
Definition: MPIManager.cpp:168
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static int GetTotalNodeCount()
Definition: MPIManager.h:42
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.
static int m_nLBCount
Definition: MPIManager.h:106
static std::vector< int * > m_vectRecvCount
Reciving count variable for MPI comminication.
Definition: MPIManager.h:92
static std::vector< int * > m_vectDispls
Displ for MPI comminication.
Definition: MPIManager.h:95

Here is the call graph for this function:

Here is the caller graph for this function:

bool CMPIManager::InitLevel ( int  nMPILevel,
int  nFindingDegeneratedEVCount 
)
static

Init MPI Level, most low level is for multi node cacluation for Lanczos.

Parameters
nMPILevelMPI level count
nFindingDegeneratedEVCountDeflation group count
Returns
Spliting MPI Group success or not

< First make group for lanczos method

< Second make group for deflation lanczos - vertical connected group

Definition at line 53 of file MPIManager.cpp.

References CheckDeflationNodeCount(), FREE_MEM, GetCurrentRank(), GetTotalNodeCount(), IsDeflationRoot(), IsLanczosComputeRoot(), m_bMultiLevel, m_bNeedPostOperation, m_deflationComm, m_deflationGroup, m_lanczosGroup, m_mpiCommIndex, m_nCommWorldRank, m_nLanczosGroupIndex, SetMPIEnviroment(), and CUtility::SetShow().

Referenced by InitMPIEnv(), and CLanczosLaunching::LaunchingLanczos().

54 {
55  bool bRtn = true;
56  int nNeedNodeCount = 1;
57  int world_size, rank;
58  int nPerGroupNode;
59  int nLanczosGroupIndex;
60  int *pNewGroupRank = NULL;
61  unsigned int i;
62  MPI_Group commWorldGroup;
63 
64 
65  if( 1 == nMPILevel )
66  {
67  m_mpiCommIndex = MPI_COMM_WORLD;
68  return bRtn;
69  }
70 
71  if( nFindingDegeneratedEVCount > 1 )
72  {
73  m_bNeedPostOperation[1] = true;
74  nNeedNodeCount *= nFindingDegeneratedEVCount;
75  nMPILevel--;
76  }
77 
79  if( nMPILevel == 1 )
80  {
81  bRtn = CheckDeflationNodeCount(nNeedNodeCount);
82  if( !bRtn )
83  return bRtn;
84  }
85 
87  nPerGroupNode = GetTotalNodeCount() / nNeedNodeCount;
88  nLanczosGroupIndex = GetCurrentRank() / nPerGroupNode;
89  pNewGroupRank = (int*)malloc(sizeof(int)*nPerGroupNode);
90 
92  for( i = 0; i < nPerGroupNode ; ++i)
93  pNewGroupRank [i] = nLanczosGroupIndex * nPerGroupNode + i;
94 
95  MPI_Comm_group(MPI_COMM_WORLD,&commWorldGroup);
96  MPI_Group_incl(commWorldGroup,nPerGroupNode,pNewGroupRank ,&m_lanczosGroup);
97  MPI_Comm_create(MPI_COMM_WORLD,m_lanczosGroup,&m_mpiCommIndex);
98  MPI_Comm_size(m_mpiCommIndex, &world_size);
99  MPI_Comm_rank(m_mpiCommIndex, &rank);
100  SetMPIEnviroment(rank, world_size);
101  m_nLanczosGroupIndex = nLanczosGroupIndex;
102 
104  pNewGroupRank = (int*)realloc(pNewGroupRank , sizeof(int)*nFindingDegeneratedEVCount);
105  for( i = 0; i < nFindingDegeneratedEVCount ; ++i)
106  pNewGroupRank [i] = i * nPerGroupNode + GetCurrentRank();
107 
108  MPI_Comm_group(MPI_COMM_WORLD,&commWorldGroup);
109  MPI_Group_incl(commWorldGroup,nFindingDegeneratedEVCount,pNewGroupRank ,&m_deflationGroup);
110  MPI_Comm_create(MPI_COMM_WORLD,m_deflationGroup,&m_deflationComm);
111  MPI_Comm_rank(m_deflationComm, &rank);
112  MPI_Comm_size(m_mpiCommIndex, &world_size);
113 
114  m_bMultiLevel = true;
115 
116  FREE_MEM(pNewGroupRank );
117 
119  CUtility::SetShow(false);
120 
121  bRtn = true;
122  return bRtn;
123 }
static MPI_Group m_lanczosGroup
MPI Group for Lanczos computation.
Definition: MPIManager.h:102
static MPI_Comm m_deflationComm
Deflation computing MPI_Comm.
Definition: MPIManager.h:101
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100
static void SetMPIEnviroment(int nRank, int nTotalNode)
Set MPI Enviroment.
Definition: MPIManager.cpp:141
static int GetTotalNodeCount()
Definition: MPIManager.h:42
static bool IsDeflationRoot()
Checking is root rank of Lanczos computation.
Definition: MPIManager.h:69
static int GetCurrentRank()
Definition: MPIManager.h:40
static int m_nCommWorldRank
MPI Rank before split.
Definition: MPIManager.h:86
static MPI_Group m_deflationGroup
MPI Group for Deflation computation.
Definition: MPIManager.h:103
static bool m_bNeedPostOperation[10]
MPI Level.
Definition: MPIManager.h:99
static bool IsLanczosComputeRoot()
Barrier current deflation group.
Definition: MPIManager.h:68
static void SetShow(bool bShow)
Definition: Utility.h:29
static unsigned int m_nLanczosGroupIndex
MPI Group index for Lanczos group.
Definition: MPIManager.h:104
static bool CheckDeflationNodeCount(int nNeedNodeCount)
Checking node counts fix to deflation group.
Definition: MPIManager.cpp:129
static bool m_bMultiLevel
Flag for Multilevel MPI group.
Definition: MPIManager.h:105
#define FREE_MEM(pointer)
Macro for memory allocation and assign null value.
Definition: Global.h:19

Here is the call graph for this function:

Here is the caller graph for this function:

bool CMPIManager::InitMPIEnv ( bool &  bMPI,
CCommandFileParser::LPINPUT_CMD_PARAM  lpParam 
)
static

Getting Lanczos group index.

Initialization of MPI environment

Parameters
bMPIRunning with MPI enviroment or not
lpParamOption parameters for program launching

Definition at line 875 of file MPIManager.cpp.

References InitLevel(), CCommandFileParser::INPUT_CMD_PARAM::nFindingDegeneratedEVCount, CCommandFileParser::INPUT_CMD_PARAM::nMPILevel, and SetMPIEnviroment().

Referenced by InitEnvironment(), and CTBMS_Solver::Launching_TBMS_Solver().

876 {
877  int rank;
878  int world_size;
879  bool bRtn = true;
880 
881  if (!bMPI)
882  return bRtn;
883 
884  MPI_Init(NULL, NULL);
885  MPI_Comm_size(MPI_COMM_WORLD, &world_size);
886  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
887 
888  CMPIManager::SetMPIEnviroment(rank, world_size);
890 
891  return bRtn;
892 }
static void SetMPIEnviroment(int nRank, int nTotalNode)
Set MPI Enviroment.
Definition: MPIManager.cpp:141
unsigned int nFindingDegeneratedEVCount
Added by jhkang>
static bool InitLevel(int nMPILevel, int nFindingDegeneratedEVCount)
Init MPI Level, most low level is for multi node cacluation for Lanczos.
Definition: MPIManager.cpp:53
unsigned int nMPILevel
MPI Grouping level.

Here is the call graph for this function:

Here is the caller graph for this function:

static bool CMPIManager::IsDeflationRoot ( )
inlinestatic

Checking is root rank of Lanczos computation.

Definition at line 69 of file MPIManager.h.

References IsRootRank(), and m_deflationComm.

Referenced by CGeometricShape::ConstructMapInfo(), CUtility::DumpCSR(), CSPLoop::DumpSolution(), GatherVDouble(), GatherVInt(), InitLevel(), CTBMS_Solver::Launching_TBMS_Solver(), CLanczosMethod::MergeDegeneratedEigenvalues(), CLanczosMethod::RecalcuWaveFunction(), CLanczosMethod::SaveLanczosResult(), CLanczosMethod::ShowLanczosResult(), CLanczosMethod::ShowLanczosWorkingTime(), and CSPLoop::SolveSchroedinger().

69 { return IsRootRank(m_deflationComm); };
static bool IsRootRank()
Get Total node count.
Definition: MPIManager.cpp:182
static MPI_Comm m_deflationComm
Deflation computing MPI_Comm.
Definition: MPIManager.h:101

Here is the call graph for this function:

Here is the caller graph for this function:

static bool CMPIManager::IsInMPIRoutine ( )
inlinestatic

Definition at line 45 of file MPIManager.h.

References m_bStartMPI.

Referenced by BroadcastVector().

45 { return m_bStartMPI; };
static bool m_bStartMPI
MPI_Init call or not.
Definition: MPIManager.h:88

Here is the caller graph for this function:

static bool CMPIManager::IsLanczosComputeRoot ( )
inlinestatic

Barrier current deflation group.

Definition at line 68 of file MPIManager.h.

References IsRootRank(), and m_mpiCommIndex.

Referenced by InitLevel(), and CLanczosMethod::MergeDegeneratedEigenvalues().

68 { return IsRootRank(m_mpiCommIndex);};
static bool IsRootRank()
Get Total node count.
Definition: MPIManager.cpp:182
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100

Here is the call graph for this function:

Here is the caller graph for this function:

static bool CMPIManager::IsMultiLevelMPI ( )
inlinestatic

Get MPI_Comm.

Definition at line 65 of file MPIManager.h.

References m_bMultiLevel.

Referenced by CLanczosMethod::LanczosIteration(), CTBMS_Solver::Launching_TBMS_Solver(), CLanczosLaunching::LaunchingLanczos(), and CSPLoop::SolveSchroedinger().

65 { return m_bMultiLevel; };
static bool m_bMultiLevel
Flag for Multilevel MPI group.
Definition: MPIManager.h:105

Here is the caller graph for this function:

bool CMPIManager::IsRootRank ( )
static

Get Total node count.

Check this node is root rank?

Returns
Root rank or not

Definition at line 182 of file MPIManager.cpp.

References GetCurrentRank().

Referenced by CTBMS_Solver::ApplyPhPotential(), BroadcastLanczosResult(), CSPLoop::BuildHamiltonian(), CGeometricShape::BuildPEBiasVector(), CGeometricShape::BuildPEHamiltonian(), CGeometricShape::BuildPEWaveVector(), CGeometricShape::CalculateUnitcellCount(), CGeometricShape::ConstructContactRegionOnPoissonGrid(), CSPLoop::ConstructionGeometric(), CGeometricShape::ConstructMapInfo(), CLanczosMethod::DoEigenValueSolving(), CLanczosMethod::DoResidualCheck(), CUtility::DumpCSR(), CSPLoop::DumpSolution(), CGeometricShape::ExchangeAtomInfoBetweenNode(), CSPLoop::executeSPLoop(), CLanczosMethod::FinalizeLanczosInterationVariable(), GetEigenvalueCountFromDeflationGroup(), CGeometricShape::GetPeriodicDirection(), InitEnvironment(), CLanczosMethod::InitLanczosIterationVariables(), IsDeflationRoot(), CGeometricShape::IsInBoundaryCondition(), IsLanczosComputeRoot(), CLanczosMethod::LanczosIteration(), CLanczosMethod::LanczosIterationLoop(), CTBMS_Solver::Launching_TBMS_Solver(), CLanczosLaunching::LaunchingLanczos(), main(), CGeometricShape::MapElecAtomOnPoissonGrid(), CSPLoop::MapZBandCB(), CLanczosMethod::MergeDegeneratedEigenvalues(), CGeometricShape::PeriodicUnitCellNumbering(), CLanczosMethod::SaveLanczosResult(), CSPLoop::SetInitialPotential(), CGeometricShape::SetShapeInformation(), CGeometricShape::SetupPEBoundaryCondition(), CLanczosMethod::ShowLanczosResult(), CLanczosMethod::ShowLanczosWorkingTime(), CSPLoop::SolvePoisson(), CSPLoop::SolveSchroedinger(), and CLanczosMethod::SortSolution().

183 {
184  bool bRtn = true;
185 
186 #ifdef DISABLE_MPI_ROUTINE
187  return bRtn;
188 #endif
189 
190  if (0 == GetCurrentRank())
191  return bRtn;
192 
193  bRtn = false;
194  return bRtn;
195 }
static int GetCurrentRank()
Definition: MPIManager.h:40

Here is the call graph for this function:

Here is the caller graph for this function:

bool CMPIManager::IsRootRank ( MPI_Comm  comm)
static

Check this node is root rank in 'comm' MPI_Comm.

Parameters
commMPI_Comm
Returns
Root rank or not in 'comm' MPI_Comm

Definition at line 201 of file MPIManager.cpp.

References GetCurrentRank().

202 {
203  if (MPI_COMM_NULL == comm)
204  return true;
205 
206  if( 0 == GetCurrentRank(comm) )
207  return true;
208  else
209  return false;
210 }
static int GetCurrentRank()
Definition: MPIManager.h:40

Here is the call graph for this function:

void CMPIManager::LoadBlancing ( int  nElementCount)
static

Load blancing for MPI, This function for lanczos solving with geometric constrcution.

Parameters
nElementCountLoad balancing count
Remarks
This function for lanczos solving with geometric constrcution

Definition at line 152 of file MPIManager.cpp.

References m_mpiCommIndex, m_nLBCount, m_nTotalNode, and m_vectLoadBalance.

Referenced by CTBMS_Solver::AllocateCSR(), and CSPLoop::AllocateCSR().

153 {
154  int *pLoadBalance = (int *)malloc(sizeof(int)*(m_nTotalNode)); // For communication size
155 
156  m_vectLoadBalance.push_back(pLoadBalance);
157 
158 #ifdef DISABLE_MPI_ROUTINE
159  return;
160 #endif
161 
162  MPI_Allgather(&nElementCount, 1, MPI_INTEGER, m_vectLoadBalance[m_nLBCount], 1, MPI_INTEGER, m_mpiCommIndex);
163 }
static std::vector< int * > m_vectLoadBalance
Load blancing for MPI Communication.
Definition: MPIManager.h:89
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100
static int m_nTotalNode
Total node count.
Definition: MPIManager.h:87
static int m_nLBCount
Definition: MPIManager.h:106

Here is the caller graph for this function:

void CMPIManager::MergeVector ( CMatrixOperation::CVector pVector,
CMatrixOperation::CVector pResultVector,
unsigned int  nMergeSize,
int  nLBIndex 
)
static

Merge vector to sub rank.

Parameters
pVectorVector for sharing
[out]pResultVectorVector for saving merging result
nMergeSizeVector size that after mergsing

Definition at line 241 of file MPIManager.cpp.

References FREE_MEM, GetCurrentLoadBalanceCount(), m_mpiCommIndex, m_vectDispls, m_vectRecvCount, CMatrixOperation::CVector::m_vectValueImaginaryBuffer, CMatrixOperation::CVector::m_vectValueRealBuffer, CTimeMeasurement::MeasurementEnd(), CTimeMeasurement::MeasurementStart(), and CTimeMeasurement::MV_COMM.

Referenced by CMatrixOperation::MVMul().

242 {
243  LPCOMPLEX_NUMBER lpSendBuffer = NULL;
244  unsigned int i;
245 
246 #ifdef DISABLE_MPI_ROUTINE
247  return;
248 #endif
249 
250 
251  double *pBuffer = (double*)malloc(sizeof(double)*nMergeSize);
253  MPI_Allgatherv(pVector->m_vectValueRealBuffer.data(), GetCurrentLoadBalanceCount(nLBIndex), MPI_DOUBLE,
254  pBuffer, m_vectRecvCount[nLBIndex], m_vectDispls[nLBIndex], MPI_DOUBLE, m_mpiCommIndex);
255 
256  for (i = 0; i < nMergeSize; i++)
257  pResultVector->m_vectValueRealBuffer[i] = pBuffer[i];
258 
259  MPI_Allgatherv(pVector->m_vectValueImaginaryBuffer.data(), GetCurrentLoadBalanceCount(nLBIndex), MPI_DOUBLE,
260  pBuffer, m_vectRecvCount[nLBIndex], m_vectDispls[nLBIndex], MPI_DOUBLE, m_mpiCommIndex);
261 
262  for (i = 0; i < nMergeSize; i++)
263  pResultVector->m_vectValueImaginaryBuffer[i] = pBuffer[i];
265 
266  FREE_MEM(pBuffer);
267 
268 
269 }
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
double_vector_t m_vectValueRealBuffer
A member variable for saving none zero elements.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100
static int GetCurrentLoadBalanceCount(int nLBIndex)
Get Current node's rank load balancing number.
Definition: MPIManager.cpp:608
double_vector_t m_vectValueImaginaryBuffer
A member variable for saving none zero elements.
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.
static std::vector< int * > m_vectRecvCount
Reciving count variable for MPI comminication.
Definition: MPIManager.h:92
struct CMPIManager::COMPLEX_NUMBER * LPCOMPLEX_NUMBER
static std::vector< int * > m_vectDispls
Displ for MPI comminication.
Definition: MPIManager.h:95
#define FREE_MEM(pointer)
Macro for memory allocation and assign null value.
Definition: Global.h:19

Here is the call graph for this function:

Here is the caller graph for this function:

void CMPIManager::MergeVector ( CMatrixOperation::CVector pVector,
unsigned int  nMergeSize,
int  nLBIndex 
)
static

Merge vector to sub rank.

Parameters
[in,out]pVectorVector for sharing and merging
nMergeSizeVector size that after mergsing

<Modified by jhkang, previous version has an error>

<Modified by="" jhkang="" end>="">

Definition at line 275 of file MPIManager.cpp.

References CTimeMeasurement::COMM, FREE_MEM, GetCurrentLoadBalanceCount(), m_vectDispls, m_vectRecvCount, CMatrixOperation::CVector::m_vectValueImaginaryBuffer, CMatrixOperation::CVector::m_vectValueRealBuffer, CTimeMeasurement::MeasurementEnd(), CTimeMeasurement::MeasurementStart(), and CMatrixOperation::CVector::SetSize().

276 {
277  LPCOMPLEX_NUMBER lpSendBuffer = NULL;
278  unsigned int i;
279 
280 #ifdef DISABLE_MPI_ROUTINE
281  return;
282 #endif
283 
285  double *pRealBuffer = (double*)malloc(sizeof(double)*nMergeSize);
286  double *pImagBuffer = (double*)malloc(sizeof(double)*nMergeSize);
287 
289 
290  MPI_Allgatherv(pVector->m_vectValueRealBuffer.data(), GetCurrentLoadBalanceCount(nLBIndex), MPI_DOUBLE,
291  pRealBuffer, m_vectRecvCount[nLBIndex], m_vectDispls[nLBIndex], MPI_DOUBLE, MPI_COMM_WORLD);
292  MPI_Allgatherv(pVector->m_vectValueImaginaryBuffer.data(), GetCurrentLoadBalanceCount(nLBIndex), MPI_DOUBLE,
293  pImagBuffer, m_vectRecvCount[nLBIndex], m_vectDispls[nLBIndex], MPI_DOUBLE, MPI_COMM_WORLD);
295 
296  pVector->SetSize(nMergeSize);
297 
298  for (i = 0; i < nMergeSize; i++)
299  {
300  pVector->m_vectValueRealBuffer[i] = pRealBuffer[i];
301  pVector->m_vectValueImaginaryBuffer[i] = pImagBuffer[i];
302  }
303  FREE_MEM(pRealBuffer);
304  FREE_MEM(pImagBuffer);
305 
307 }
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
double_vector_t m_vectValueRealBuffer
A member variable for saving none zero elements.
static int GetCurrentLoadBalanceCount(int nLBIndex)
Get Current node's rank load balancing number.
Definition: MPIManager.cpp:608
double_vector_t m_vectValueImaginaryBuffer
A member variable for saving none zero elements.
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.
static std::vector< int * > m_vectRecvCount
Reciving count variable for MPI comminication.
Definition: MPIManager.h:92
struct CMPIManager::COMPLEX_NUMBER * LPCOMPLEX_NUMBER
static std::vector< int * > m_vectDispls
Displ for MPI comminication.
Definition: MPIManager.h:95
#define FREE_MEM(pointer)
Macro for memory allocation and assign null value.
Definition: Global.h:19
void SetSize(unsigned int nSize)
Set Vector elements size.

Here is the call graph for this function:

void CMPIManager::MergeVectorEx_Optimal ( CMatrixOperation::CVector pSrcVector,
CMatrixOperation::CVector pResultVector,
unsigned int  nMergeSize,
double  fFirstIndex,
unsigned int  nSizeFromPrevRank,
unsigned int  nSizeFromNextRank,
unsigned int  nSizetoPrevRank,
unsigned int  nSizetoNextRank,
unsigned int *  mPos,
int  nLBIndex 
)
static

Merge vector for 1 layer exchanging.

Parameters
pSrcVectorVector for sharing
[out]pResultVectorVector for saving merging result
nMergeSizeVector size that after mergsing
fFirstIndexFirst index for local vector index
nSizeFromPrevRankExchanging size from previous node
nSizeFromNextRankExchanging size from next node
nSizetoPrevRankExchanging size to previous node
nSizetoNextRankExchanging size to next node
mPosPrevious, local, next node start index

Definition at line 320 of file MPIManager.cpp.

References GetCurrentLoadBalanceCount(), GetLoadBalanceCount(), m_mpiCommIndex, m_nCurrentRank, m_nTotalNode, m_vectDispls, m_vectRecvCount, CMatrixOperation::CVector::m_vectValueImaginaryBuffer, CMatrixOperation::CVector::m_vectValueRealBuffer, CTimeMeasurement::MeasurementEnd(), CTimeMeasurement::MeasurementStart(), and CTimeMeasurement::MV_COMM.

Referenced by CMatrixOperation::MVMulEx_Optimal().

321 {
322  if (m_nTotalNode <= 3)
323  {
325  MPI_Allgatherv(pSrcVector->m_vectValueRealBuffer.data(), GetCurrentLoadBalanceCount(nLBIndex), MPI_DOUBLE, pResultVector->m_vectValueRealBuffer.data(), m_vectRecvCount[nLBIndex], m_vectDispls[nLBIndex], MPI_DOUBLE, m_mpiCommIndex);
326  MPI_Allgatherv(pSrcVector->m_vectValueImaginaryBuffer.data(), GetCurrentLoadBalanceCount(nLBIndex), MPI_DOUBLE, pResultVector->m_vectValueImaginaryBuffer.data(), m_vectRecvCount[nLBIndex], m_vectDispls[nLBIndex], MPI_DOUBLE, m_mpiCommIndex);
328  mPos[0] = -1; mPos[1] = -1; mPos[2] = -1;
329  }
330  else
331  {
332  long long fCurrentRankPos = fFirstIndex;
333  int nPrevRank = (m_nCurrentRank - 1 + m_nTotalNode) % m_nTotalNode;
334  int nNextRank = (m_nCurrentRank + 1) % m_nTotalNode;
335  long long fPrevRankPos = -1, fNextRankPos = -1;
336  double *pSendBuffer = NULL, *pRecvBuffer = NULL;
337  MPI_Request req[2];
338  MPI_Status status[2];
339 
340  if (0 == m_nCurrentRank)
341  fPrevRankPos = nMergeSize - nSizeFromPrevRank;
342  else
343  fPrevRankPos = fFirstIndex - nSizeFromPrevRank;
344 
345  if (m_nCurrentRank == m_nTotalNode - 1)
346  fNextRankPos = 0;
347  else
348  fNextRankPos = fFirstIndex + GetLoadBalanceCount(m_nCurrentRank, nLBIndex);
349 
350  mPos[0] = (unsigned int)fPrevRankPos; mPos[1] = (unsigned int)fCurrentRankPos; mPos[2] = (unsigned int)fNextRankPos;
351 
352  //printf("Rank %d: myload=%d, nSizeFromPrevRank=%d, nSizeFromNextRank=%d, nSizetoPrevRank=%d, nSizetoNextRank=%d, fPrevRankPos=%d, fNextRankPos=%d\n", m_nCurrentRank, GetLoadBalanceCount(m_nCurrentRank), nSizeFromPrevRank, nSizeFromNextRank, nSizetoPrevRank, nSizetoNextRank, fPrevRankPos, fNextRankPos);
353 
354  pSendBuffer = pSrcVector->m_vectValueRealBuffer.data();
355  pRecvBuffer = pResultVector->m_vectValueRealBuffer.data();
356 
358  MPI_Irecv(pRecvBuffer + fPrevRankPos, nSizeFromPrevRank, MPI_DOUBLE, nPrevRank, m_nCurrentRank, m_mpiCommIndex, &req[0]);
359  MPI_Isend(pSendBuffer + GetLoadBalanceCount(m_nCurrentRank, nLBIndex) - nSizetoNextRank, nSizetoNextRank, MPI_DOUBLE, nNextRank, nNextRank, m_mpiCommIndex, &req[1]);
360  MPI_Waitall(2, req, status);
361 
362  MPI_Irecv(pRecvBuffer + fNextRankPos, nSizeFromNextRank, MPI_DOUBLE, nNextRank, m_nCurrentRank, m_mpiCommIndex, &req[0]);
363  MPI_Isend(pSendBuffer, nSizetoPrevRank, MPI_DOUBLE, nPrevRank, nPrevRank, m_mpiCommIndex, &req[1]);
364  MPI_Waitall(2, req, status);
366 
367  pSendBuffer = pSrcVector->m_vectValueImaginaryBuffer.data();
368  pRecvBuffer = pResultVector->m_vectValueImaginaryBuffer.data();
369 
371  MPI_Irecv(pRecvBuffer + fPrevRankPos, nSizeFromPrevRank, MPI_DOUBLE, nPrevRank, m_nCurrentRank, m_mpiCommIndex, &req[0]);
372  MPI_Isend(pSendBuffer + GetLoadBalanceCount(m_nCurrentRank, nLBIndex) - nSizetoNextRank, nSizetoNextRank, MPI_DOUBLE, nNextRank, nNextRank, m_mpiCommIndex, &req[1]);
373  MPI_Waitall(2, req, status);
374 
375  MPI_Irecv(pRecvBuffer + fNextRankPos, nSizeFromNextRank, MPI_DOUBLE, nNextRank, m_nCurrentRank, m_mpiCommIndex, &req[0]);
376  MPI_Isend(pSendBuffer, nSizetoPrevRank, MPI_DOUBLE, nPrevRank, nPrevRank, m_mpiCommIndex, &req[1]);
377  MPI_Waitall(2, req, status);
379 
380  memcpy(pResultVector->m_vectValueRealBuffer.data() + (long long)fFirstIndex, pSrcVector->m_vectValueRealBuffer.data(), GetLoadBalanceCount(m_nCurrentRank, nLBIndex) * sizeof(double));
381  memcpy(pResultVector->m_vectValueImaginaryBuffer.data() + (long long)fFirstIndex, pSrcVector->m_vectValueImaginaryBuffer.data(), GetLoadBalanceCount(m_nCurrentRank, nLBIndex) * sizeof(double));
382 
383  }
384 }
static int m_nCurrentRank
MPI Rank.
Definition: MPIManager.h:85
static int GetLoadBalanceCount(int nRank, int nLBIndex)
Definition: MPIManager.cpp:168
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
double_vector_t m_vectValueRealBuffer
A member variable for saving none zero elements.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100
static int GetCurrentLoadBalanceCount(int nLBIndex)
Get Current node's rank load balancing number.
Definition: MPIManager.cpp:608
static int m_nTotalNode
Total node count.
Definition: MPIManager.h:87
double_vector_t m_vectValueImaginaryBuffer
A member variable for saving none zero elements.
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.
static std::vector< int * > m_vectRecvCount
Reciving count variable for MPI comminication.
Definition: MPIManager.h:92
static std::vector< int * > m_vectDispls
Displ for MPI comminication.
Definition: MPIManager.h:95

Here is the call graph for this function:

Here is the caller graph for this function:

void CMPIManager::MergeVectorOptimal ( CMatrixOperation::CVector pSrcVector,
CMatrixOperation::CVector pResultVector,
unsigned int  nMergeSize,
double  fFirstIndex,
int  nLBIndex 
)
static

Merge vector to sub rank, operated without vector class member function call.

Parameters
pSrcVectorVector for sharing
[out]pResultVectorVector for saving merging result
nMergeSizeVector size that after mergsing
fFirstIndexFirst index for local vector index

Definition at line 392 of file MPIManager.cpp.

References FREE_MEM, GetCurrentLoadBalanceCount(), GetLoadBalanceCount(), m_mpiCommIndex, m_nCurrentRank, m_nTotalNode, m_vectDispls, m_vectRecvCount, CMatrixOperation::CVector::m_vectValueImaginaryBuffer, CMatrixOperation::CVector::m_vectValueRealBuffer, CTimeMeasurement::MeasurementEnd(), CTimeMeasurement::MeasurementStart(), CTimeMeasurement::MV_COMM, CTimeMeasurement::MV_FREE_MEM, and CTimeMeasurement::MV_MALLOC.

Referenced by CMatrixOperation::MVMulOptimal(), and CMatrixOperation::MVMulOptimal_Nooffload().

393 {
394  if( m_nTotalNode <= 3 )
395  {
397  MPI_Allgatherv(pSrcVector->m_vectValueRealBuffer.data(), GetCurrentLoadBalanceCount(nLBIndex), MPI_DOUBLE,
398  pResultVector->m_vectValueRealBuffer.data(), m_vectRecvCount[nLBIndex], m_vectDispls[nLBIndex], MPI_DOUBLE, m_mpiCommIndex);
399  MPI_Allgatherv(pSrcVector->m_vectValueImaginaryBuffer.data(), GetCurrentLoadBalanceCount(nLBIndex), MPI_DOUBLE,
400  pResultVector->m_vectValueImaginaryBuffer.data(), m_vectRecvCount[nLBIndex], m_vectDispls[nLBIndex], MPI_DOUBLE, m_mpiCommIndex);
402  }
403  else
404  {
405  double fCurrentRankPos = fFirstIndex;
406  long long fPrevRankPos = -1, fNextRankPos = -1;
407  int nPrevRank = (m_nCurrentRank-1+m_nTotalNode)%m_nTotalNode;
408  int nNextRank = (m_nCurrentRank+1)%m_nTotalNode;
409  long long nMax;
410  double *pSendBuffer = NULL, *pRecvBuffer = NULL;
411  MPI_Request req[2];
412  MPI_Status status[2];
413 
414  if( 0 == m_nCurrentRank )
415  fPrevRankPos = nMergeSize - GetLoadBalanceCount(nPrevRank, nLBIndex);
416  else
417  fPrevRankPos = fFirstIndex - GetLoadBalanceCount(nPrevRank, nLBIndex);
418 
419  if (m_nCurrentRank == m_nTotalNode - 1)
420  fNextRankPos = 0;
421  else
422  fNextRankPos = fCurrentRankPos + GetCurrentLoadBalanceCount(nLBIndex);
423 
424 #ifdef _WIN32
425  nMax = max(GetLoadBalanceCount(nPrevRank, nLBIndex), GetLoadBalanceCount(nNextRank, nLBIndex));
426 #else //_WIN32
427  nMax = std::max(GetLoadBalanceCount(nPrevRank, nLBIndex), GetLoadBalanceCount(nNextRank, nLBIndex));
428 #endif//
429 
431  pRecvBuffer = (double*)malloc(sizeof(double)*nMax*2);
432  pSendBuffer = (double*)malloc(sizeof(double)*GetLoadBalanceCount(m_nCurrentRank, nLBIndex)*2);
434 
435 
436  memcpy(pSendBuffer, pSrcVector->m_vectValueRealBuffer.data(), sizeof(double)*GetLoadBalanceCount(m_nCurrentRank, nLBIndex));
437  memcpy(pSendBuffer+ GetLoadBalanceCount(m_nCurrentRank, nLBIndex), pSrcVector->m_vectValueImaginaryBuffer.data() , sizeof(double)*GetLoadBalanceCount(m_nCurrentRank, nLBIndex));
438 
440  MPI_Irecv(pRecvBuffer, 2 * GetLoadBalanceCount(nPrevRank, nLBIndex), MPI_DOUBLE, nPrevRank, m_nCurrentRank, m_mpiCommIndex, &req[0]);
441  MPI_Isend(pSendBuffer, 2 * GetLoadBalanceCount(m_nCurrentRank, nLBIndex), MPI_DOUBLE, nNextRank, nNextRank, m_mpiCommIndex, &req[1]);
442  MPI_Waitall(2, req, status);
444 
445  memcpy(pResultVector->m_vectValueRealBuffer.data() + fPrevRankPos, pRecvBuffer, GetLoadBalanceCount(nPrevRank, nLBIndex) * sizeof(double));
446  memcpy(pResultVector->m_vectValueImaginaryBuffer.data() + fPrevRankPos, pRecvBuffer + GetLoadBalanceCount(nPrevRank, nLBIndex), GetLoadBalanceCount(nPrevRank, nLBIndex) * sizeof(double));
447 
449  MPI_Irecv(pRecvBuffer, 2 * GetLoadBalanceCount(nNextRank, nLBIndex), MPI_DOUBLE, nNextRank, m_nCurrentRank, m_mpiCommIndex, &req[0]);
450  MPI_Isend(pSendBuffer, 2 * GetLoadBalanceCount(m_nCurrentRank, nLBIndex), MPI_DOUBLE, nPrevRank, nPrevRank, m_mpiCommIndex, &req[1]);
451  MPI_Waitall(2, req, status);
453  memcpy(pResultVector->m_vectValueRealBuffer.data() + fNextRankPos, pRecvBuffer, GetLoadBalanceCount(nNextRank, nLBIndex) * sizeof(double));
454  memcpy(pResultVector->m_vectValueImaginaryBuffer.data() + fNextRankPos, pRecvBuffer + GetLoadBalanceCount(nNextRank, nLBIndex), GetLoadBalanceCount(nNextRank, nLBIndex) * sizeof(double));
455 
457  FREE_MEM(pRecvBuffer);
458  FREE_MEM(pSendBuffer);
460 
461  memcpy(pResultVector->m_vectValueRealBuffer.data() + (long long)fFirstIndex, pSrcVector->m_vectValueRealBuffer.data(), GetLoadBalanceCount(m_nCurrentRank, nLBIndex) * sizeof(double));
462  memcpy(pResultVector->m_vectValueImaginaryBuffer.data() + (long long)fFirstIndex, pSrcVector->m_vectValueImaginaryBuffer.data(), GetLoadBalanceCount(m_nCurrentRank, nLBIndex) * sizeof(double));
463  }
464 }
static int m_nCurrentRank
MPI Rank.
Definition: MPIManager.h:85
static int GetLoadBalanceCount(int nRank, int nLBIndex)
Definition: MPIManager.cpp:168
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
double_vector_t m_vectValueRealBuffer
A member variable for saving none zero elements.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100
static int GetCurrentLoadBalanceCount(int nLBIndex)
Get Current node's rank load balancing number.
Definition: MPIManager.cpp:608
static int m_nTotalNode
Total node count.
Definition: MPIManager.h:87
double_vector_t m_vectValueImaginaryBuffer
A member variable for saving none zero elements.
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.
static std::vector< int * > m_vectRecvCount
Reciving count variable for MPI comminication.
Definition: MPIManager.h:92
static std::vector< int * > m_vectDispls
Displ for MPI comminication.
Definition: MPIManager.h:95
#define FREE_MEM(pointer)
Macro for memory allocation and assign null value.
Definition: Global.h:19

Here is the call graph for this function:

Here is the caller graph for this function:

void CMPIManager::ReceiveDoubleBufferSync ( int  nSourceRank,
double *  pBuffer,
int  nSize,
MPI_Request *  req,
MPI_Comm  commWorld = MPI_COMM_NULL 
)
static

Receivinging buffer for double data array with sync.

Parameters
nSourceRankSource rank index
pBufferData buffer that want to receive
nSizeData buffer size
reqMPI request parameter

Definition at line 712 of file MPIManager.cpp.

References CTimeMeasurement::COMM, GetCurrentRank(), m_mpiCommIndex, CTimeMeasurement::MeasurementEnd(), and CTimeMeasurement::MeasurementStart().

Referenced by CGeometricShape::BuildPEBiasVector(), CGeometricShape::BuildPEWaveVector(), CGeometricShape::ConstructContactRegionOnPoissonGrid(), CGeometricShape::ConstructMapInfo(), CUtility::DumpCSR(), CGeometricShape::ExchangeAtomInfoBetweenNode(), CGeometricShape::MapElecAtomOnPoissonGrid(), and ReceiveVectorSync().

713 {
714  MPI_Status status;
716  if( MPI_COMM_NULL == commWorld)
717  MPI_Recv(pBuffer, nSize, MPI_DOUBLE, nSourceRank, CMPIManager::GetCurrentRank(), m_mpiCommIndex, &status);
718  else
719  MPI_Recv(pBuffer, nSize, MPI_DOUBLE, nSourceRank, CMPIManager::GetCurrentRank(commWorld), commWorld, &status);
721 }
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100
static int GetCurrentRank()
Definition: MPIManager.h:40
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.

Here is the call graph for this function:

Here is the caller graph for this function:

void CMPIManager::ReceiveVectorSync ( int  nSourceRank,
CMatrixOperation::CVector pVector,
int  nSize,
MPI_Request *  req,
MPI_Comm  commWorld = MPI_COMM_NULL 
)
static

Receiving Vector with sync.

Parameters
nSourceRankSource rank for receiving data
pVectorReceiving buffer
nSizeReceiving size
reqMPI_Request for MPI_Recv
commWorldMPI_Comm for Receiving data

Definition at line 858 of file MPIManager.cpp.

References FREE_MEM, ReceiveDoubleBufferSync(), and CMatrixOperation::CVector::Serialize().

Referenced by CLanczosMethod::MergeDegeneratedEigenvalues().

859 {
860  double *pBuffer = NULL;
861 
862  pBuffer = (double*)malloc(sizeof(double)*nSize*2);
863 
864  ReceiveDoubleBufferSync(nSourceRank, pBuffer, nSize * 2, req, commWorld);
865 
866  pVector->Serialize(pBuffer, true);
867 
868  FREE_MEM(pBuffer);
869 }
static void ReceiveDoubleBufferSync(int nSourceRank, double *pBuffer, int nSize, MPI_Request *req, MPI_Comm commWorld=MPI_COMM_NULL)
Receivinging buffer for double data array with sync.
Definition: MPIManager.cpp:712
bool Serialize(double *pBuffer, bool bStore)
Serialize vector.
#define FREE_MEM(pointer)
Macro for memory allocation and assign null value.
Definition: Global.h:19

Here is the call graph for this function:

Here is the caller graph for this function:

void CMPIManager::SendDoubleBufferSync ( int  nTargetRank,
double *  pBuffer,
int  nSize,
MPI_Request *  req,
MPI_Comm  commWorld = MPI_COMM_NULL 
)
static

Sending buffer for double data array with sync.

Parameters
nTargetRankTarget rank index
pBufferData buffer that want to send
nSizeData buffer size
reqMPI request parameter

Definition at line 685 of file MPIManager.cpp.

References CTimeMeasurement::COMM, GetCurrentRank(), m_mpiCommIndex, CTimeMeasurement::MeasurementEnd(), and CTimeMeasurement::MeasurementStart().

Referenced by CGeometricShape::BuildPEBiasVector(), CGeometricShape::BuildPEWaveVector(), CGeometricShape::ConstructContactRegionOnPoissonGrid(), CGeometricShape::ConstructMapInfo(), CUtility::DumpCSR(), CGeometricShape::ExchangeAtomInfoBetweenNode(), CGeometricShape::MapElecAtomOnPoissonGrid(), and SendVectorSync().

686 {
687  int nRank = CMPIManager::GetCurrentRank();
689  if( MPI_COMM_NULL == commWorld)
690  MPI_Send(pBuffer, nSize, MPI_DOUBLE, nTargetRank, nTargetRank, m_mpiCommIndex);
691  else
692  MPI_Send(pBuffer, nSize, MPI_DOUBLE, nTargetRank, nTargetRank, commWorld);
694 }
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: MPIManager.h:100
static int GetCurrentRank()
Definition: MPIManager.h:40
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.

Here is the call graph for this function:

Here is the caller graph for this function:

void CMPIManager::SendVectorSync ( int  nTargetRank,
CMatrixOperation::CVector pVector,
int  nSize,
MPI_Request *  req,
MPI_Comm  commWorld = MPI_COMM_NULL 
)
static

Getting Deflation computing group MPI_Comm.

Sending Vector with sync

Parameters
nTargetRankSending target rank
pVectorSending buffer
nSizeSending size
reqMPI_Request for MPI_Send
commWorldMPI_Comm for sending data

Definition at line 838 of file MPIManager.cpp.

References FREE_MEM, SendDoubleBufferSync(), and CMatrixOperation::CVector::Serialize().

Referenced by CLanczosMethod::MergeDegeneratedEigenvalues().

839 {
840  double *pBuffer = NULL;
841 
842  pBuffer = (double*)malloc(sizeof(double)*nSize*2);
843  pVector->Serialize(pBuffer, false);
844 
845  SendDoubleBufferSync(nTargetRank, pBuffer, nSize * 2, req, commWorld);
846 
847 
848  FREE_MEM(pBuffer);
849 }
static void SendDoubleBufferSync(int nTargetRank, double *pBuffer, int nSize, MPI_Request *req, MPI_Comm commWorld=MPI_COMM_NULL)
Sending buffer for double data array with sync.
Definition: MPIManager.cpp:685
bool Serialize(double *pBuffer, bool bStore)
Serialize vector.
#define FREE_MEM(pointer)
Macro for memory allocation and assign null value.
Definition: Global.h:19

Here is the call graph for this function:

Here is the caller graph for this function:

void CMPIManager::SetMPIEnviroment ( int  nRank,
int  nTotalNode 
)
static

Set MPI Enviroment.

Parameters
nRankCurrent rank index
nTotalNodeTotal rank count

Definition at line 141 of file MPIManager.cpp.

References m_bStartMPI, m_nCurrentRank, and m_nTotalNode.

Referenced by InitLevel(), InitMPIEnv(), and CLanczosLaunching::LaunchingLanczos().

142 {
143  m_nCurrentRank = nRank;
144  m_nTotalNode = nTotalNode;
145  m_bStartMPI = true;
146 }
static int m_nCurrentRank
MPI Rank.
Definition: MPIManager.h:85
static bool m_bStartMPI
MPI_Init call or not.
Definition: MPIManager.h:88
static int m_nTotalNode
Total node count.
Definition: MPIManager.h:87

Here is the caller graph for this function:

void CMPIManager::SetPhiTid ( int *  tid)
static

Definition at line 894 of file MPIManager.cpp.

References GetCurrentRank().

Referenced by CSPLoop::executeSPLoop().

895 {
896 #ifndef DISABLE_MPI_ROUTINE
897  tid[0] = GetCurrentRank() % 2;
898 // FIXME test code
899 #ifndef _WIN32
900  char host_name[256];
901  gethostname(host_name, 256);
902  printf("-[Host:%s] (MPI:%03d) mapped to (MIC:%03d)\n", host_name, GetCurrentRank(), tid[0]);
903 #endif //_WIN32
904 #else
905  tid[0] = 0;
906 #endif
907 }
static int GetCurrentRank()
Definition: MPIManager.h:40

Here is the call graph for this function:

Here is the caller graph for this function:

void CMPIManager::WaitReceiveDoubleBufferAsync ( MPI_Request *  req)
static

Waiting recevinging double buffer sync function.

Parameters
reqMPI request parameter

Definition at line 726 of file MPIManager.cpp.

References m_ReceiveDoubleAsyncRequest.

727 {
728  MPI_Status status;
729 
730  MPI_Wait(&m_ReceiveDoubleAsyncRequest, &status);
731 }
static MPI_Request m_ReceiveDoubleAsyncRequest
Request for receving double.
Definition: MPIManager.h:97
void CMPIManager::WaitSendDoubleBufferSync ( MPI_Request *  req)
static

Waiting sending double buffer sync function.

Parameters
reqMPI request parameter

Definition at line 699 of file MPIManager.cpp.

References m_SendDoubleAsyncRequest.

700 {
701  MPI_Status status;
702 
703  MPI_Wait(&m_SendDoubleAsyncRequest, &status);
704 }
static MPI_Request m_SendDoubleAsyncRequest
Request for sending double.
Definition: MPIManager.h:96

Member Data Documentation

bool CMPIManager::m_bMultiLevel = false
staticprivate

Flag for Multilevel MPI group.

Definition at line 105 of file MPIManager.h.

Referenced by InitLevel(), and IsMultiLevelMPI().

bool CMPIManager::m_bNeedPostOperation = { false, false, false, false, false, false, false, false, false, false }
staticprivate

MPI Level.

Definition at line 99 of file MPIManager.h.

Referenced by InitLevel().

bool CMPIManager::m_bStartMPI = false
staticprivate

MPI_Init call or not.

Definition at line 88 of file MPIManager.h.

Referenced by FinalizeManager(), IsInMPIRoutine(), and SetMPIEnviroment().

MPI_Comm CMPIManager::m_deflationComm = MPI_COMM_NULL
staticprivate
MPI_Group CMPIManager::m_deflationGroup = MPI_GROUP_EMPTY
staticprivate

MPI Group for Deflation computation.

Definition at line 103 of file MPIManager.h.

Referenced by FinalizeManager(), and InitLevel().

MPI_Group CMPIManager::m_lanczosGroup = MPI_GROUP_EMPTY
staticprivate

MPI Group for Lanczos computation.

Definition at line 102 of file MPIManager.h.

Referenced by FinalizeManager(), and InitLevel().

int CMPIManager::m_nCommWorldRank = 0
staticprivate

MPI Rank before split.

Definition at line 86 of file MPIManager.h.

Referenced by InitLevel().

int CMPIManager::m_nCurrentRank = 0
staticprivate
unsigned int CMPIManager::m_nLanczosGroupIndex = 0
staticprivate

MPI Group index for Lanczos group.

Definition at line 104 of file MPIManager.h.

Referenced by GetLanczosGroupIndex(), and InitLevel().

int CMPIManager::m_nLBCount = 0
staticprivate

Definition at line 106 of file MPIManager.h.

Referenced by FinalizeManager(), InitCommunicationBufferMetric(), and LoadBlancing().

unsigned int CMPIManager::m_nMPILevel = 1
staticprivate

MPI Level.

Definition at line 98 of file MPIManager.h.

int CMPIManager::m_nTotalNode = 1
staticprivate
int * CMPIManager::m_pBankInfo = NULL
staticprivate

After MPI Split bank infomation.

Definition at line 94 of file MPIManager.h.

Referenced by FinalizeManager().

CMPIManager::LPCOMPLEX_NUMBER CMPIManager::m_pCommBuffer = NULL
staticprivate

Data buffer for MPI Communication.

Definition at line 90 of file MPIManager.h.

CMPIManager::LPCOMPLEX_NUMBER CMPIManager::m_pConvertingBuffer = NULL
staticprivate

Data buffer for Vector converting.

Definition at line 91 of file MPIManager.h.

MPI_Request CMPIManager::m_ReceiveDoubleAsyncRequest = MPI_REQUEST_NULL
staticprivate

Request for receving double.

Definition at line 97 of file MPIManager.h.

Referenced by WaitReceiveDoubleBufferAsync().

MPI_Request CMPIManager::m_SendDoubleAsyncRequest = MPI_REQUEST_NULL
staticprivate

Request for sending double.

Definition at line 96 of file MPIManager.h.

Referenced by WaitSendDoubleBufferSync().

std::vector< int * > CMPIManager::m_vectDispls
staticprivate

Displ for MPI comminication.

Definition at line 95 of file MPIManager.h.

Referenced by FinalizeManager(), InitCommunicationBufferMetric(), MergeVector(), MergeVectorEx_Optimal(), and MergeVectorOptimal().

std::vector< int * > CMPIManager::m_vectLoadBalance
staticprivate

Load blancing for MPI Communication.

Definition at line 89 of file MPIManager.h.

Referenced by FinalizeManager(), GetCurrentLoadBalanceCount(), GetLoadBalanceCount(), and LoadBlancing().

std::vector< int * > CMPIManager::m_vectRecvCount
staticprivate

Reciving count variable for MPI comminication.

Definition at line 92 of file MPIManager.h.

Referenced by FinalizeManager(), InitCommunicationBufferMetric(), MergeVector(), MergeVectorEx_Optimal(), and MergeVectorOptimal().

std::vector< int * > CMPIManager::m_vectSendCount
staticprivate

Sending count variable for MPI comminication.

Definition at line 93 of file MPIManager.h.

Referenced by FinalizeManager(), and InitCommunicationBufferMetric().


The documentation for this class was generated from the following files: