Coverage for pySDC/helpers/blocks.py: 0%
68 statements
« prev ^ index » next coverage.py v7.6.12, created at 2025-02-20 10:09 +0000
« prev ^ index » next coverage.py v7.6.12, created at 2025-02-20 10:09 +0000
1import numpy as np
4class BlockDecomposition(object):
5 """
6 Class decomposing a cartesian space domain (1D to 3D) into a given number of processors.
8 Parameters
9 ----------
10 nProcs : int
11 Total number of processors for space block decomposition.
12 gridSizes : list[int]
13 Number of grid points in each dimension
14 algo : str, optional
15 Algorithm used for the block decomposition :
17 - Hybrid : approach minimizing interface communication, inspired from
18 the `[Hybrid CFD solver] <https://web.stanford.edu/group/ctr/ResBriefs07/5_larsson1_pp47_58.pdf>`_.
19 - ChatGPT : quickly generated using `[ChatGPT] <https://chatgpt.com>`_.
20 The default is "Hybrid".
21 gRank : int, optional
22 If provided, the global rank that will determine the local block distribution. Default is None.
23 order : str, optional
24 The order used when computing the rank block distribution. Default is `C`.
25 """
27 def __init__(self, nProcs, gridSizes, algo="Hybrid", gRank=None, order="C"):
28 dim = len(gridSizes)
29 assert dim in [1, 2, 3], "block decomposition only works for 1D, 2D or 3D domains"
31 if algo == "ChatGPT":
33 nBlocks = [1] * dim
34 for i in range(2, int(nProcs**0.5) + 1):
35 while nProcs % i == 0:
36 nBlocks[0] *= i
37 nProcs //= i
38 nBlocks.sort()
40 if nProcs > 1:
41 nBlocks[0] *= nProcs
43 nBlocks.sort()
44 while len(nBlocks) < dim:
45 smallest = nBlocks.pop(0)
46 nBlocks += [1, smallest]
47 nBlocks.sort()
49 while len(nBlocks) > dim:
50 smallest = nBlocks.pop(0)
51 next_smallest = nBlocks.pop(0)
52 nBlocks.append(smallest * next_smallest)
53 nBlocks.sort()
55 elif algo == "Hybrid":
56 rest = nProcs
57 facs = {
58 1: [1],
59 2: [2, 1],
60 3: [2, 3, 1],
61 }[dim]
62 exps = [0] * dim
63 for n in range(dim - 1):
64 while (rest % facs[n]) == 0:
65 exps[n] = exps[n] + 1
66 rest = rest // facs[n]
67 if rest > 1:
68 facs[dim - 1] = rest
69 exps[dim - 1] = 1
71 nBlocks = [1] * dim
72 for n in range(dim - 1, -1, -1):
73 while exps[n] > 0:
74 dummymax = -1
75 dmax = 0
76 for d, nPts in enumerate(gridSizes):
77 dummy = (nPts + nBlocks[d] - 1) // nBlocks[d]
78 if dummy >= dummymax:
79 dummymax = dummy
80 dmax = d
81 nBlocks[dmax] = nBlocks[dmax] * facs[n]
82 exps[n] = exps[n] - 1
84 else:
85 raise NotImplementedError(f"algo={algo}")
87 # Store attributes
88 self.dim = dim
89 self.nBlocks = nBlocks
90 self.gridSizes = gridSizes
92 # Used for rank block distribution
93 self.gRank = gRank
94 self.order = order
96 @property
97 def ranks(self):
98 assert self.gRank is not None, "gRank attribute needs to be set"
99 cart = np.arange(np.prod(self.nBlocks)).reshape(self.nBlocks, order=self.order)
100 return list(np.argwhere(cart == self.gRank)[0])
102 @property
103 def localBounds(self):
104 iLocList, nLocList = [], []
105 for rank, nPoints, nBlocks in zip(self.ranks, self.gridSizes, self.nBlocks):
106 n0 = nPoints // nBlocks
107 nRest = nPoints - nBlocks * n0
108 nLoc = n0 + 1 * (rank < nRest)
109 iLoc = rank * n0 + nRest * (rank >= nRest) + rank * (rank < nRest)
111 iLocList.append(iLoc)
112 nLocList.append(nLoc)
113 return iLocList, nLocList
116if __name__ == "__main__":
117 # Base usage of this module for a 2D decomposition
118 from mpi4py import MPI
119 from time import sleep
121 comm: MPI.Intracomm = MPI.COMM_WORLD
122 MPI_SIZE = comm.Get_size()
123 MPI_RANK = comm.Get_rank()
125 blocks = BlockDecomposition(MPI_SIZE, [256, 64], gRank=MPI_RANK)
126 if MPI_RANK == 0:
127 print(f"nBlocks : {blocks.nBlocks}")
129 ranks = blocks.ranks
130 bounds = blocks.localBounds
132 comm.Barrier()
133 sleep(0.01 * MPI_RANK)
134 print(f"[Rank {MPI_RANK}] pRankX={ranks}, bounds={bounds}")