Coverage for pySDC/implementations/problem_classes/generic_spectral.py: 59%

198 statements  

« prev     ^ index     » next       coverage.py v7.9.2, created at 2025-07-18 13:09 +0000

1from pySDC.core.problem import Problem, WorkCounter 

2from pySDC.helpers.spectral_helper import SpectralHelper 

3import numpy as np 

4from pySDC.core.errors import ParameterError 

5from pySDC.helpers.fieldsIO import Rectilinear 

6 

7 

8class GenericSpectralLinear(Problem): 

9 """ 

10 Generic class to solve problems of the form M u_t + L u = y, with mass matrix M, linear operator L and some right 

11 hand side y using spectral methods. 

12 L may contain algebraic conditions, as long as (M + dt L) is invertible. 

13 

14 Note that the `__getattr__` method is overloaded to pass requests on to the spectral helper if they are not 

15 attributes of this class itself. For instance, you can add a BC by calling `self.spectral.add_BC` or equivalently 

16 `self.add_BC`. 

17 

18 You can port problems derived from this more or less seamlessly to GPU by using the numerical libraries that are 

19 class attributes of the spectral helper. This class will automatically switch the datatype using the `setup_GPU` class method. 

20 

21 Attributes: 

22 spectral (pySDC.helpers.spectral_helper.SpectralHelper): Spectral helper 

23 work_counters (dict): Dictionary for counting work 

24 cached_factorizations (dict): Dictionary of cached matrix factorizations for solving 

25 L (sparse matrix): Linear operator 

26 M (sparse matrix): Mass matrix 

27 diff_mask (list): Mask for separating differential and algebraic terms 

28 Pl (sparse matrix): Left preconditioner 

29 Pr (sparse matrix): Right preconditioner 

30 """ 

31 

32 def setup_GPU(self): 

33 """switch to GPU modules""" 

34 import cupy as cp 

35 from pySDC.implementations.datatype_classes.cupy_mesh import cupy_mesh, imex_cupy_mesh 

36 from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh 

37 

38 self.dtype_u = cupy_mesh 

39 

40 GPU_versions = { 

41 mesh: cupy_mesh, 

42 imex_mesh: imex_cupy_mesh, 

43 } 

44 

45 self.dtype_f = GPU_versions[self.dtype_f] 

46 

47 if self.comm is not None: 

48 from pySDC.helpers.NCCL_communicator import NCCLComm 

49 

50 if not isinstance(self.comm, NCCLComm): 

51 self.__dict__['comm'] = NCCLComm(self.comm) 

52 

53 def __init__( 

54 self, 

55 bases, 

56 components, 

57 comm=None, 

58 Dirichlet_recombination=True, 

59 left_preconditioner=True, 

60 solver_type='cached_direct', 

61 solver_args=None, 

62 preconditioner_args=None, 

63 useGPU=False, 

64 max_cached_factorizations=12, 

65 spectral_space=True, 

66 real_spectral_coefficients=False, 

67 debug=False, 

68 ): 

69 """ 

70 Base class for problems discretized with spectral methods. 

71 

72 Args: 

73 bases (list of dictionaries): 1D Bases 

74 components (list of strings): Components of the equations 

75 comm (mpi4py.Intracomm or None): MPI communicator 

76 Dirichlet_recombination (bool): Use Dirichlet recombination in the last axis as right preconditioner 

77 left_preconditioner (bool): Reverse the Kronecker product if yes 

78 solver_type (str): Solver for linear systems 

79 solver_args (dict): Arguments for linear solver 

80 useGPU (bool): Run on GPU or CPU 

81 max_cached_factorizations (int): Number of matrix decompositions to cache before starting eviction 

82 spectral_space (bool): If yes, the solution will not be transformed back after solving and evaluating the RHS, and is expected as input in spectral space to these functions 

83 real_spectral_coefficients (bool): If yes, allow only real values in spectral space, otherwise, allow complex. 

84 debug (bool): Make additional tests at extra computational cost 

85 """ 

86 solver_args = {} if solver_args is None else solver_args 

87 

88 preconditioner_args = {} if preconditioner_args is None else preconditioner_args 

89 preconditioner_args['drop_tol'] = preconditioner_args.get('drop_tol', 1e-3) 

90 preconditioner_args['fill_factor'] = preconditioner_args.get('fill_factor', 100) 

91 

92 self._makeAttributeAndRegister( 

93 'max_cached_factorizations', 

94 'useGPU', 

95 'solver_type', 

96 'solver_args', 

97 'preconditioner_args', 

98 'left_preconditioner', 

99 'Dirichlet_recombination', 

100 'comm', 

101 'spectral_space', 

102 'real_spectral_coefficients', 

103 'debug', 

104 localVars=locals(), 

105 ) 

106 self.spectral = SpectralHelper(comm=comm, useGPU=useGPU, debug=debug) 

107 

108 if useGPU: 

109 self.setup_GPU() 

110 if self.solver_args is not None: 

111 if 'rtol' in self.solver_args.keys(): 

112 self.solver_args['tol'] = self.solver_args.pop('rtol') 

113 

114 for base in bases: 

115 self.spectral.add_axis(**base) 

116 self.spectral.add_component(components) 

117 

118 self.spectral.setup_fft(real_spectral_coefficients) 

119 

120 super().__init__(init=self.spectral.init_forward if spectral_space else self.spectral.init) 

121 

122 self.work_counters[solver_type] = WorkCounter() 

123 self.work_counters['factorizations'] = WorkCounter() 

124 

125 self.setup_preconditioner(Dirichlet_recombination, left_preconditioner) 

126 

127 self.cached_factorizations = {} 

128 

129 def __getattr__(self, name): 

130 """ 

131 Pass requests on to the helper if they are not directly attributes of this class for convenience. 

132 

133 Args: 

134 name (str): Name of the attribute you want 

135 

136 Returns: 

137 request 

138 """ 

139 return getattr(self.spectral, name) 

140 

141 def _setup_operator(self, LHS): 

142 """ 

143 Setup a sparse linear operator by adding relationships. See documentation for ``GenericSpectralLinear.setup_L`` to learn more. 

144 

145 Args: 

146 LHS (dict): Equations to be added to the operator 

147 

148 Returns: 

149 sparse linear operator 

150 """ 

151 operator = self.spectral.get_empty_operator_matrix() 

152 for line, equation in LHS.items(): 

153 self.spectral.add_equation_lhs(operator, line, equation) 

154 return self.spectral.convert_operator_matrix_to_operator(operator) 

155 

156 def setup_L(self, LHS): 

157 """ 

158 Setup the left hand side of the linear operator L and store it in ``self.L``. 

159 

160 The argument is meant to be a dictionary with the line you want to write the equation in as the key and the relationship between components as another dictionary. For instance, you can add an algebraic condition capturing a first derivative relationship between u and ux as follows: 

161 

162 ``` 

163 Dx = self.get_differentiation_matrix(axes=(0,)) 

164 I = self.get_Id() 

165 LHS = {'ux': {'u': Dx, 'ux': -I}} 

166 self.setup_L(LHS) 

167 ``` 

168 

169 If you put zero as right hand side for the solver in the line for ux, ux will contain the x-derivative of u afterwards. 

170 

171 Args: 

172 LHS (dict): Dictionary containing the equations. 

173 """ 

174 self.L = self._setup_operator(LHS) 

175 

176 def setup_M(self, LHS): 

177 ''' 

178 Setup mass matrix, see documentation of ``GenericSpectralLinear.setup_L``. 

179 ''' 

180 diff_index = list(LHS.keys()) 

181 self.diff_mask = [me in diff_index for me in self.components] 

182 self.M = self._setup_operator(LHS) 

183 

184 def setup_preconditioner(self, Dirichlet_recombination=True, left_preconditioner=True): 

185 """ 

186 Get left and right preconditioners. 

187 

188 Args: 

189 Dirichlet_recombination (bool): Basis conversion for right preconditioner. Useful for Chebychev and Ultraspherical methods. 10/10 would recommend. 

190 left_preconditioner (bool): If True, it will interleave the variables and reverse the Kronecker product 

191 """ 

192 sp = self.spectral.sparse_lib 

193 N = np.prod(self.init[0][1:]) 

194 

195 Id = sp.eye(N) 

196 Pl_lhs = {comp: {comp: Id} for comp in self.components} 

197 self.Pl = self._setup_operator(Pl_lhs) 

198 

199 if left_preconditioner: 

200 # reverse Kronecker product 

201 

202 if self.spectral.useGPU: 

203 R = self.Pl.get().tolil() * 0 

204 else: 

205 R = self.Pl.tolil() * 0 

206 

207 for j in range(self.ncomponents): 

208 for i in range(N): 

209 R[i * self.ncomponents + j, j * N + i] = 1.0 

210 

211 self.Pl = self.spectral.sparse_lib.csc_matrix(R) 

212 

213 if Dirichlet_recombination and type(self.axes[-1]).__name__ in ['ChebychevHelper', 'UltrasphericalHelper']: 

214 _Pr = self.spectral.get_Dirichlet_recombination_matrix(axis=-1) 

215 else: 

216 _Pr = Id 

217 

218 Pr_lhs = {comp: {comp: _Pr} for comp in self.components} 

219 self.Pr = self._setup_operator(Pr_lhs) @ self.Pl.T 

220 

221 def solve_system(self, rhs, dt, u0=None, *args, skip_itransform=False, **kwargs): 

222 """ 

223 Do an implicit Euler step to solve M u_t + Lu = rhs, with M the mass matrix and L the linear operator as setup by 

224 ``GenericSpectralLinear.setup_L`` and ``GenericSpectralLinear.setup_M``. 

225 

226 The implicit Euler step is (M - dt L) u = M rhs. Note that M need not be invertible as long as (M + dt*L) is. 

227 This means solving with dt=0 to mimic explicit methods does not work for all problems, in particular simple DAEs. 

228 

229 Note that by putting M rhs on the right hand side, this function can only solve algebraic conditions equal to 

230 zero. If you want something else, it should be easy to overload this function. 

231 """ 

232 

233 sp = self.spectral.sparse_lib 

234 

235 if self.spectral_space: 

236 rhs_hat = rhs.copy() 

237 if u0 is not None: 

238 u0_hat = u0.copy().flatten() 

239 else: 

240 u0_hat = None 

241 else: 

242 rhs_hat = self.spectral.transform(rhs) 

243 if u0 is not None: 

244 u0_hat = self.spectral.transform(u0).flatten() 

245 else: 

246 u0_hat = None 

247 

248 # apply inverse right preconditioner to initial guess 

249 if u0_hat is not None and 'direct' not in self.solver_type: 

250 if not hasattr(self, '_Pr_inv'): 

251 self._PR_inv = self.linalg.splu(self.Pr.astype(complex)).solve 

252 u0_hat[...] = self._PR_inv(u0_hat) 

253 

254 rhs_hat = (self.M @ rhs_hat.flatten()).reshape(rhs_hat.shape) 

255 rhs_hat = self.spectral.put_BCs_in_rhs_hat(rhs_hat) 

256 rhs_hat = self.Pl @ rhs_hat.flatten() 

257 

258 if dt not in self.cached_factorizations.keys() or not self.solver_type.lower() == 'cached_direct': 

259 A = self.M + dt * self.L 

260 A = self.Pl @ self.spectral.put_BCs_in_matrix(A) @ self.Pr 

261 

262 # if A.shape[0] < 200e20: 

263 # import matplotlib.pyplot as plt 

264 

265 # # M = self.spectral.put_BCs_in_matrix(self.L.copy()) 

266 # M = A # self.L 

267 # im = plt.spy(M) 

268 # plt.show() 

269 

270 if 'ilu' in self.solver_type.lower(): 

271 if dt not in self.cached_factorizations.keys(): 

272 if len(self.cached_factorizations) >= self.max_cached_factorizations: 

273 to_evict = list(self.cached_factorizations.keys())[0] 

274 self.cached_factorizations.pop(to_evict) 

275 self.logger.debug(f'Evicted matrix factorization for {to_evict=:.6f} from cache') 

276 iLU = self.linalg.spilu( 

277 A, **{**self.preconditioner_args, 'drop_tol': dt * self.preconditioner_args['drop_tol']} 

278 ) 

279 self.cached_factorizations[dt] = self.linalg.LinearOperator(A.shape, iLU.solve) 

280 self.logger.debug(f'Cached incomplete LU factorization for {dt=:.6f}') 

281 self.work_counters['factorizations']() 

282 M = self.cached_factorizations[dt] 

283 else: 

284 M = None 

285 info = 0 

286 

287 if self.solver_type.lower() == 'cached_direct': 

288 if dt not in self.cached_factorizations.keys(): 

289 if len(self.cached_factorizations) >= self.max_cached_factorizations: 

290 self.cached_factorizations.pop(list(self.cached_factorizations.keys())[0]) 

291 self.logger.debug(f'Evicted matrix factorization for {dt=:.6f} from cache') 

292 self.cached_factorizations[dt] = self.spectral.linalg.factorized(A) 

293 self.logger.debug(f'Cached matrix factorization for {dt=:.6f}') 

294 self.work_counters['factorizations']() 

295 

296 _sol_hat = self.cached_factorizations[dt](rhs_hat) 

297 self.logger.debug(f'Used cached matrix factorization for {dt=:.6f}') 

298 

299 elif self.solver_type.lower() == 'direct': 

300 _sol_hat = sp.linalg.spsolve(A, rhs_hat) 

301 elif 'gmres' in self.solver_type.lower(): 

302 _sol_hat, _ = sp.linalg.gmres( 

303 A, 

304 rhs_hat, 

305 x0=u0_hat, 

306 **self.solver_args, 

307 callback=self.work_counters[self.solver_type], 

308 callback_type='pr_norm', 

309 M=M, 

310 ) 

311 elif self.solver_type.lower() == 'cg': 

312 _sol_hat, info = sp.linalg.cg( 

313 A, rhs_hat, x0=u0_hat, **self.solver_args, callback=self.work_counters[self.solver_type] 

314 ) 

315 elif 'bicgstab' in self.solver_type.lower(): 

316 _sol_hat, info = self.linalg.bicgstab( 

317 A, 

318 rhs_hat, 

319 x0=u0_hat, 

320 **self.solver_args, 

321 callback=self.work_counters[self.solver_type], 

322 M=M, 

323 ) 

324 else: 

325 raise NotImplementedError(f'Solver {self.solver_type=} not implemented in {type(self).__name__}!') 

326 

327 if info != 0: 

328 self.logger.warn(f'{self.solver_type} not converged! {info=}') 

329 

330 sol_hat = self.spectral.u_init_forward 

331 sol_hat[...] = (self.Pr @ _sol_hat).reshape(sol_hat.shape) 

332 

333 if self.spectral_space: 

334 return sol_hat 

335 else: 

336 sol = self.spectral.u_init 

337 sol[:] = self.spectral.itransform(sol_hat).real 

338 

339 if self.spectral.debug: 

340 self.spectral.check_BCs(sol) 

341 

342 return sol 

343 

344 def setUpFieldsIO(self): 

345 Rectilinear.setupMPI( 

346 comm=self.comm, 

347 iLoc=[me.start for me in self.local_slice(False)], 

348 nLoc=[me.stop - me.start for me in self.local_slice(False)], 

349 ) 

350 

351 def getOutputFile(self, fileName): 

352 self.setUpFieldsIO() 

353 

354 coords = [me.get_1dgrid() for me in self.spectral.axes] 

355 assert np.allclose([len(me) for me in coords], self.spectral.global_shape[1:]) 

356 

357 fOut = Rectilinear(np.float64, fileName=fileName) 

358 fOut.setHeader(nVar=len(self.components), coords=coords) 

359 fOut.initialize() 

360 return fOut 

361 

362 def processSolutionForOutput(self, u): 

363 if self.spectral_space: 

364 return np.array(self.itransform(u).real) 

365 else: 

366 return np.array(u.real) 

367 

368 

369def compute_residual_DAE(self, stage=''): 

370 """ 

371 Computation of the residual that does not add u_0 - u_m in algebraic equations. 

372 

373 Args: 

374 stage (str): The current stage of the step the level belongs to 

375 """ 

376 

377 # get current level and problem description 

378 L = self.level 

379 

380 # Check if we want to skip the residual computation to gain performance 

381 # Keep in mind that skipping any residual computation is likely to give incorrect outputs of the residual! 

382 if stage in self.params.skip_residual_computation: 

383 L.status.residual = 0.0 if L.status.residual is None else L.status.residual 

384 return None 

385 

386 # check if there are new values (e.g. from a sweep) 

387 # assert L.status.updated 

388 

389 # compute the residual for each node 

390 

391 # build QF(u) 

392 res_norm = [] 

393 res = self.integrate() 

394 mask = L.prob.diff_mask 

395 for m in range(self.coll.num_nodes): 

396 res[m][mask] += L.u[0][mask] - L.u[m + 1][mask] 

397 # add tau if associated 

398 if L.tau[m] is not None: 

399 res[m] += L.tau[m] 

400 # use abs function from data type here 

401 res_norm.append(abs(res[m])) 

402 

403 # find maximal residual over the nodes 

404 if L.params.residual_type == 'full_abs': 

405 L.status.residual = max(res_norm) 

406 elif L.params.residual_type == 'last_abs': 

407 L.status.residual = res_norm[-1] 

408 elif L.params.residual_type == 'full_rel': 

409 L.status.residual = max(res_norm) / abs(L.u[0]) 

410 elif L.params.residual_type == 'last_rel': 

411 L.status.residual = res_norm[-1] / abs(L.u[0]) 

412 else: 

413 raise ParameterError( 

414 f'residual_type = {L.params.residual_type} not implemented, choose ' 

415 f'full_abs, last_abs, full_rel or last_rel instead' 

416 ) 

417 

418 # indicate that the residual has seen the new values 

419 L.status.updated = False 

420 

421 return None 

422 

423 

424def compute_residual_DAE_MPI(self, stage=None): 

425 """ 

426 Computation of the residual using the collocation matrix Q 

427 

428 Args: 

429 stage (str): The current stage of the step the level belongs to 

430 """ 

431 from mpi4py import MPI 

432 

433 L = self.level 

434 

435 # Check if we want to skip the residual computation to gain performance 

436 # Keep in mind that skipping any residual computation is likely to give incorrect outputs of the residual! 

437 if stage in self.params.skip_residual_computation: 

438 L.status.residual = 0.0 if L.status.residual is None else L.status.residual 

439 return None 

440 

441 # compute the residual for each node 

442 

443 # build QF(u) 

444 res = self.integrate(last_only=L.params.residual_type[:4] == 'last') 

445 mask = L.prob.diff_mask 

446 res[mask] += L.u[0][mask] - L.u[self.rank + 1][mask] 

447 # add tau if associated 

448 if L.tau[self.rank] is not None: 

449 res += L.tau[self.rank] 

450 # use abs function from data type here 

451 res_norm = abs(res) 

452 

453 # find maximal residual over the nodes 

454 if L.params.residual_type == 'full_abs': 

455 L.status.residual = self.comm.allreduce(res_norm, op=MPI.MAX) 

456 elif L.params.residual_type == 'last_abs': 

457 L.status.residual = self.comm.bcast(res_norm, root=self.comm.size - 1) 

458 elif L.params.residual_type == 'full_rel': 

459 L.status.residual = self.comm.allreduce(res_norm / abs(L.u[0]), op=MPI.MAX) 

460 elif L.params.residual_type == 'last_rel': 

461 L.status.residual = self.comm.bcast(res_norm / abs(L.u[0]), root=self.comm.size - 1) 

462 else: 

463 raise NotImplementedError(f'residual type \"{L.params.residual_type}\" not implemented!') 

464 

465 # indicate that the residual has seen the new values 

466 L.status.updated = False 

467 

468 return None 

469 

470 

471def get_extrapolated_error_DAE(self, S, **kwargs): 

472 """ 

473 The extrapolation estimate combines values of u and f from multiple steps to extrapolate and compare to the 

474 solution obtained by the time marching scheme. This function can be used in `EstimateExtrapolationError`. 

475 

476 Args: 

477 S (pySDC.Step): The current step 

478 

479 Returns: 

480 None 

481 """ 

482 u_ex = self.get_extrapolated_solution(S) 

483 diff_mask = S.levels[0].prob.diff_mask 

484 if u_ex is not None: 

485 S.levels[0].status.error_extrapolation_estimate = ( 

486 abs((u_ex - S.levels[0].u[-1])[diff_mask]) * self.coeff.prefactor 

487 ) 

488 # print([abs(me) for me in (u_ex - S.levels[0].u[-1]) * self.coeff.prefactor]) 

489 else: 

490 S.levels[0].status.error_extrapolation_estimate = None