Coverage for pySDC/implementations/sweeper_classes/boris_2nd_order.py: 99%

118 statements  

« prev     ^ index     » next       coverage.py v7.10.6, created at 2025-09-04 15:08 +0000

1import numpy as np 

2 

3from pySDC.core.sweeper import Sweeper 

4 

5 

6class boris_2nd_order(Sweeper): 

7 """ 

8 Custom sweeper class, implements Sweeper.py 

9 

10 Second-order sweeper using velocity-Verlet with Boris scheme as base integrator 

11 

12 Attributes: 

13 S: node-to-node collocation matrix (first order) 

14 SQ: node-to-node collocation matrix (second order) 

15 ST: node-to-node trapezoidal matrix 

16 Sx: node-to-node Euler half-step for position update 

17 """ 

18 

19 def __init__(self, params, level): 

20 """ 

21 Initialization routine for the custom sweeper 

22 

23 Args: 

24 params: parameters for the sweeper 

25 level (pySDC.Level.level): the level that uses this sweeper 

26 """ 

27 

28 if "QI" not in params: 

29 params["QI"] = "IE" 

30 if "QE" not in params: 

31 params["QE"] = "EE" 

32 

33 super(boris_2nd_order, self).__init__(params, level) 

34 

35 # S- and SQ-matrices (derived from Q) and Sx- and ST-matrices for the integrator 

36 [ 

37 self.S, 

38 self.ST, 

39 self.SQ, 

40 self.Sx, 

41 self.QQ, 

42 self.QI, 

43 self.QT, 

44 self.Qx, 

45 self.Q, 

46 ] = self.__get_Qd() 

47 

48 self.qQ = np.dot(self.coll.weights, self.coll.Qmat[1:, 1:]) 

49 

50 def __get_Qd(self): 

51 """ 

52 Get integration matrices for 2nd-order SDC 

53 

54 Returns: 

55 S: node-to-node collocation matrix (first order) 

56 SQ: node-to-node collocation matrix (second order) 

57 ST: node-to-node trapezoidal matrix 

58 Sx: node-to-node Euler half-step for position update 

59 """ 

60 

61 # set implicit and explicit Euler matrices (default, but can be changed) 

62 QI = self.get_Qdelta_implicit(qd_type=self.params.QI) 

63 QE = self.get_Qdelta_explicit(qd_type=self.params.QE) 

64 

65 # trapezoidal rule 

66 QT = 1 / 2 * (QI + QE) 

67 

68 # Qx as in the paper 

69 Qx = np.dot(QE, QT) + 1 / 2 * QE * QE 

70 

71 Sx = np.zeros(np.shape(self.coll.Qmat)) 

72 ST = np.zeros(np.shape(self.coll.Qmat)) 

73 S = np.zeros(np.shape(self.coll.Qmat)) 

74 

75 # fill-in node-to-node matrices 

76 Sx[0, :] = Qx[0, :] 

77 ST[0, :] = QT[0, :] 

78 S[0, :] = self.coll.Qmat[0, :] 

79 for m in range(self.coll.num_nodes): 

80 Sx[m + 1, :] = Qx[m + 1, :] - Qx[m, :] 

81 ST[m + 1, :] = QT[m + 1, :] - QT[m, :] 

82 S[m + 1, :] = self.coll.Qmat[m + 1, :] - self.coll.Qmat[m, :] 

83 # SQ via dot-product, could also be done via QQ 

84 SQ = np.dot(S, self.coll.Qmat) 

85 

86 # QQ-matrix via product of Q 

87 QQ = np.dot(self.coll.Qmat, self.coll.Qmat) 

88 

89 return [S, ST, SQ, Sx, QQ, QI, QT, Qx, self.coll.Qmat] 

90 

91 def update_nodes(self): 

92 """ 

93 Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes 

94 

95 Returns: 

96 None 

97 """ 

98 

99 # get current level and problem description 

100 L = self.level 

101 P = L.prob 

102 

103 # only if the level has been touched before 

104 assert L.status.unlocked 

105 

106 # get number of collocation nodes for easier access 

107 M = self.coll.num_nodes 

108 

109 # initialize integral terms with zeros, will add stuff later 

110 integral = [P.dtype_u(P.init, val=0.0) for l in range(M)] 

111 

112 # gather all terms which are known already (e.g. from the previous iteration) 

113 # this corresponds to SF(u^k) - SdF(u^k) + tau (note: have integrals in pos and vel!) 

114 for m in range(M): 

115 for j in range(M + 1): 

116 # build RHS from f-terms (containing the E field) and the B field 

117 f = P.build_f(L.f[j], L.u[j], L.time + L.dt * self.coll.nodes[j - 1]) 

118 # add SQF(u^k) - SxF(u^k) for the position 

119 integral[m].pos += L.dt * (L.dt * (self.SQ[m + 1, j] - self.Sx[m + 1, j]) * f) 

120 # add SF(u^k) - STF(u^k) for the velocity 

121 integral[m].vel += L.dt * (self.S[m + 1, j] - self.ST[m + 1, j]) * f 

122 # add tau if associated 

123 if L.tau[m] is not None: 

124 integral[m] += L.tau[m] 

125 # tau is 0-to-node, need to change it to node-to-node here 

126 if m > 0: 

127 integral[m] -= L.tau[m - 1] 

128 

129 # do the sweep 

130 for m in range(0, M): 

131 # build rhs, consisting of the known values from above and new values from previous nodes (at k+1) 

132 tmp = P.dtype_u(integral[m]) 

133 for j in range(m + 1): 

134 # build RHS from f-terms (containing the E field) and the B field 

135 f = P.build_f(L.f[j], L.u[j], L.time + L.dt * self.coll.nodes[j - 1]) 

136 # add SxF(u^{k+1}) 

137 tmp.pos += L.dt * (L.dt * self.Sx[m + 1, j] * f) 

138 # add pos at previous node + dt*v0 

139 tmp.pos += L.u[m].pos + L.dt * self.coll.delta_m[m] * L.u[0].vel 

140 # set new position, is explicit 

141 L.u[m + 1].pos = tmp.pos 

142 

143 # get E field with new positions and compute mean 

144 L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m]) 

145 

146 ck = tmp.vel 

147 

148 # do the boris scheme 

149 L.u[m + 1].vel = P.boris_solver(ck, L.dt * np.diag(self.QI)[m + 1], L.f[m], L.f[m + 1], L.u[m]) 

150 

151 # indicate presence of new values at this level 

152 L.status.updated = True 

153 

154 return None 

155 

156 def integrate(self): 

157 """ 

158 Integrates the right-hand side 

159 

160 Returns: 

161 list of dtype_u: containing the integral as values 

162 """ 

163 

164 # get current level and problem description 

165 L = self.level 

166 P = L.prob 

167 

168 # create new instance of dtype_u, initialize values with 0 

169 p = [] 

170 

171 for m in range(1, self.coll.num_nodes + 1): 

172 p.append(P.dtype_u(P.init, val=0.0)) 

173 

174 # integrate RHS over all collocation nodes, RHS is here only f(x,v)! 

175 for j in range(1, self.coll.num_nodes + 1): 

176 f = P.build_f(L.f[j], L.u[j], L.time + L.dt * self.coll.nodes[j - 1]) 

177 p[-1].pos += L.dt * (L.dt * self.QQ[m, j] * f) + L.dt * self.coll.Qmat[m, j] * L.u[0].vel 

178 p[-1].vel += L.dt * self.coll.Qmat[m, j] * f 

179 

180 return p 

181 

182 def compute_end_point(self): 

183 """ 

184 Compute u at the right point of the interval 

185 

186 The value uend computed here is a full evaluation of the Picard formulation (always!) 

187 

188 Returns: 

189 None 

190 """ 

191 

192 # get current level and problem description 

193 L = self.level 

194 P = L.prob 

195 

196 # start with u0 and add integral over the full interval (using coll.weights) 

197 L.uend = P.dtype_u(L.u[0]) 

198 for m in range(self.coll.num_nodes): 

199 f = P.build_f(L.f[m + 1], L.u[m + 1], L.time + L.dt * self.coll.nodes[m]) 

200 L.uend.pos += L.dt * (L.dt * self.qQ[m] * f) + L.dt * self.coll.weights[m] * L.u[0].vel 

201 L.uend.vel += L.dt * self.coll.weights[m] * f 

202 # add up tau correction of the full interval (last entry) 

203 if L.tau[-1] is not None: 

204 L.uend += L.tau[-1] 

205 

206 return None 

207 

208 def get_sweeper_mats(self): 

209 """ 

210 Returns the matrices Q, QQ, Qx, QT which define the sweeper. 

211 """ 

212 

213 Q = self.Q[1:, 1:] 

214 QQ = self.QQ[1:, 1:] 

215 Qx = self.Qx[1:, 1:] 

216 QT = self.QT[1:, 1:] 

217 

218 return Q, QQ, Qx, QT 

219 

220 def get_scalar_problems_sweeper_mats(self, lambdas=None): 

221 """ 

222 This function returns the corresponding matrices of an SDC sweep matrix formulation 

223 

224 Args: 

225 lambdas (numpy.narray): the first entry in lambdas is k-spring constant and the second is mu friction. 

226 """ 

227 

228 Q, QQ, Qx, QT = self.get_sweeper_mats() 

229 

230 if lambdas is None: 

231 pass 

232 # should use lambdas from attached problem and make sure it is scalar SDC 

233 raise NotImplementedError("At the moment, the values for the lambda have to be provided") 

234 else: 

235 k = lambdas[0] 

236 mu = lambdas[1] 

237 

238 nnodes = self.coll.num_nodes 

239 dt = self.level.dt 

240 

241 F = np.block( 

242 [ 

243 [-k * np.eye(nnodes), -mu * np.eye(nnodes)], 

244 [-k * np.eye(nnodes), -mu * np.eye(nnodes)], 

245 ] 

246 ) 

247 

248 C_coll = np.block([[np.eye(nnodes), dt * Q], [np.zeros([nnodes, nnodes]), np.eye(nnodes)]]) 

249 Q_coll = np.block( 

250 [ 

251 [dt**2 * QQ, np.zeros([nnodes, nnodes])], 

252 [np.zeros([nnodes, nnodes]), dt * Q], 

253 ] 

254 ) 

255 Q_vv = np.block( 

256 [ 

257 [dt**2 * Qx, np.zeros([nnodes, nnodes])], 

258 [np.zeros([nnodes, nnodes]), dt * QT], 

259 ] 

260 ) 

261 M_vv = np.eye(2 * nnodes) - np.dot(Q_vv, F) 

262 

263 return C_coll, Q_coll, Q_vv, M_vv, F 

264 

265 def get_scalar_problems_manysweep_mats(self, nsweeps, lambdas=None): 

266 """ 

267 For a scalar problem, K sweeps of SDC can be written in matrix form. 

268 

269 Args: 

270 nsweeps (int): number of sweeps 

271 lambdas (numpy.ndarray): the first entry in lambdas is k-spring constant and the second is mu friction. 

272 """ 

273 nnodes = self.coll.num_nodes 

274 

275 C_coll, Q_coll, Q_vv, M_vv, F = self.get_scalar_problems_sweeper_mats(lambdas=lambdas) 

276 

277 K_sdc = np.dot(np.linalg.inv(M_vv), Q_coll - Q_vv) @ F 

278 

279 Keig, Kvec = np.linalg.eig(K_sdc) 

280 

281 Kp_sdc = np.linalg.matrix_power(K_sdc, nsweeps) 

282 Kinv_sdc = np.linalg.inv(np.eye(2 * nnodes) - K_sdc) 

283 

284 Kdot_sdc = np.dot(np.eye(2 * nnodes) - Kp_sdc, Kinv_sdc) 

285 MC = np.dot(np.linalg.inv(M_vv), C_coll) 

286 

287 Mat_sweep = Kp_sdc + np.dot(Kdot_sdc, MC) 

288 

289 return Mat_sweep, np.max(np.abs(Keig)) 

290 

291 def get_scalar_problems_picardsweep_mats(self, nsweeps, lambdas=None): 

292 """ 

293 For a scalar problem, K sweeps of SDC can be written in matrix form. 

294 

295 Args: 

296 nsweeps (int): number of sweeps 

297 lambdas (numpy.ndarray): the first entry in lambdas is k-spring constant and the second is mu friction. 

298 """ 

299 nnodes = self.coll.num_nodes 

300 

301 C_coll, Q_coll, Q_vv, M_vv, F = self.get_scalar_problems_sweeper_mats(lambdas=lambdas) 

302 

303 K_sdc = np.dot(Q_coll, F) 

304 

305 Keig, Kvec = np.linalg.eig(K_sdc) 

306 

307 Kp_sdc = np.linalg.matrix_power(K_sdc, nsweeps) 

308 Kinv_sdc = np.linalg.inv(np.eye(2 * nnodes) - K_sdc) 

309 

310 Kdot_sdc = np.dot(np.eye(2 * nnodes) - Kp_sdc, Kinv_sdc) 

311 

312 Mat_sweep = Kp_sdc + np.dot(Kdot_sdc, C_coll) 

313 

314 return Mat_sweep, np.max(np.abs(Keig))