Coverage for pySDC/implementations/sweeper_classes/boris_2nd_order.py: 99%

118 statements  

« prev     ^ index     » next       coverage.py v7.5.0, created at 2024-04-29 09:02 +0000

1import numpy as np 

2 

3from pySDC.core.Sweeper import sweeper 

4 

5 

6class boris_2nd_order(sweeper): 

7 """ 

8 Custom sweeper class, implements Sweeper.py 

9 

10 Second-order sweeper using velocity-Verlet with Boris scheme as base integrator 

11 

12 Attributes: 

13 S: node-to-node collocation matrix (first order) 

14 SQ: node-to-node collocation matrix (second order) 

15 ST: node-to-node trapezoidal matrix 

16 Sx: node-to-node Euler half-step for position update 

17 """ 

18 

19 def __init__(self, params): 

20 """ 

21 Initialization routine for the custom sweeper 

22 

23 Args: 

24 params: parameters for the sweeper 

25 """ 

26 

27 # call parent's initialization routine 

28 

29 if "QI" not in params: 

30 params["QI"] = "IE" 

31 if "QE" not in params: 

32 params["QE"] = "EE" 

33 

34 super(boris_2nd_order, self).__init__(params) 

35 

36 # S- and SQ-matrices (derived from Q) and Sx- and ST-matrices for the integrator 

37 [ 

38 self.S, 

39 self.ST, 

40 self.SQ, 

41 self.Sx, 

42 self.QQ, 

43 self.QI, 

44 self.QT, 

45 self.Qx, 

46 self.Q, 

47 ] = self.__get_Qd() 

48 

49 self.qQ = np.dot(self.coll.weights, self.coll.Qmat[1:, 1:]) 

50 

51 def __get_Qd(self): 

52 """ 

53 Get integration matrices for 2nd-order SDC 

54 

55 Returns: 

56 S: node-to-node collocation matrix (first order) 

57 SQ: node-to-node collocation matrix (second order) 

58 ST: node-to-node trapezoidal matrix 

59 Sx: node-to-node Euler half-step for position update 

60 """ 

61 

62 # set implicit and explicit Euler matrices (default, but can be changed) 

63 QI = self.get_Qdelta_implicit(self.coll, qd_type=self.params.QI) 

64 QE = self.get_Qdelta_explicit(self.coll, qd_type=self.params.QE) 

65 

66 # trapezoidal rule 

67 QT = 1 / 2 * (QI + QE) 

68 

69 # Qx as in the paper 

70 Qx = np.dot(QE, QT) + 1 / 2 * QE * QE 

71 

72 Sx = np.zeros(np.shape(self.coll.Qmat)) 

73 ST = np.zeros(np.shape(self.coll.Qmat)) 

74 S = np.zeros(np.shape(self.coll.Qmat)) 

75 

76 # fill-in node-to-node matrices 

77 Sx[0, :] = Qx[0, :] 

78 ST[0, :] = QT[0, :] 

79 S[0, :] = self.coll.Qmat[0, :] 

80 for m in range(self.coll.num_nodes): 

81 Sx[m + 1, :] = Qx[m + 1, :] - Qx[m, :] 

82 ST[m + 1, :] = QT[m + 1, :] - QT[m, :] 

83 S[m + 1, :] = self.coll.Qmat[m + 1, :] - self.coll.Qmat[m, :] 

84 # SQ via dot-product, could also be done via QQ 

85 SQ = np.dot(S, self.coll.Qmat) 

86 

87 # QQ-matrix via product of Q 

88 QQ = np.dot(self.coll.Qmat, self.coll.Qmat) 

89 

90 return [S, ST, SQ, Sx, QQ, QI, QT, Qx, self.coll.Qmat] 

91 

92 def update_nodes(self): 

93 """ 

94 Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes 

95 

96 Returns: 

97 None 

98 """ 

99 

100 # get current level and problem description 

101 L = self.level 

102 P = L.prob 

103 

104 # only if the level has been touched before 

105 assert L.status.unlocked 

106 

107 # get number of collocation nodes for easier access 

108 M = self.coll.num_nodes 

109 

110 # initialize integral terms with zeros, will add stuff later 

111 integral = [P.dtype_u(P.init, val=0.0) for l in range(M)] 

112 

113 # gather all terms which are known already (e.g. from the previous iteration) 

114 # this corresponds to SF(u^k) - SdF(u^k) + tau (note: have integrals in pos and vel!) 

115 for m in range(M): 

116 for j in range(M + 1): 

117 # build RHS from f-terms (containing the E field) and the B field 

118 f = P.build_f(L.f[j], L.u[j], L.time + L.dt * self.coll.nodes[j - 1]) 

119 # add SQF(u^k) - SxF(u^k) for the position 

120 integral[m].pos += L.dt * (L.dt * (self.SQ[m + 1, j] - self.Sx[m + 1, j]) * f) 

121 # add SF(u^k) - STF(u^k) for the velocity 

122 integral[m].vel += L.dt * (self.S[m + 1, j] - self.ST[m + 1, j]) * f 

123 # add tau if associated 

124 if L.tau[m] is not None: 

125 integral[m] += L.tau[m] 

126 # tau is 0-to-node, need to change it to node-to-node here 

127 if m > 0: 

128 integral[m] -= L.tau[m - 1] 

129 

130 # do the sweep 

131 for m in range(0, M): 

132 # build rhs, consisting of the known values from above and new values from previous nodes (at k+1) 

133 tmp = P.dtype_u(integral[m]) 

134 for j in range(m + 1): 

135 # build RHS from f-terms (containing the E field) and the B field 

136 f = P.build_f(L.f[j], L.u[j], L.time + L.dt * self.coll.nodes[j - 1]) 

137 # add SxF(u^{k+1}) 

138 tmp.pos += L.dt * (L.dt * self.Sx[m + 1, j] * f) 

139 # add pos at previous node + dt*v0 

140 tmp.pos += L.u[m].pos + L.dt * self.coll.delta_m[m] * L.u[0].vel 

141 # set new position, is explicit 

142 L.u[m + 1].pos = tmp.pos 

143 

144 # get E field with new positions and compute mean 

145 L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m]) 

146 

147 ck = tmp.vel 

148 

149 # do the boris scheme 

150 L.u[m + 1].vel = P.boris_solver(ck, L.dt * np.diag(self.QI)[m + 1], L.f[m], L.f[m + 1], L.u[m]) 

151 

152 # indicate presence of new values at this level 

153 L.status.updated = True 

154 

155 return None 

156 

157 def integrate(self): 

158 """ 

159 Integrates the right-hand side 

160 

161 Returns: 

162 list of dtype_u: containing the integral as values 

163 """ 

164 

165 # get current level and problem description 

166 L = self.level 

167 P = L.prob 

168 

169 # create new instance of dtype_u, initialize values with 0 

170 p = [] 

171 

172 for m in range(1, self.coll.num_nodes + 1): 

173 p.append(P.dtype_u(P.init, val=0.0)) 

174 

175 # integrate RHS over all collocation nodes, RHS is here only f(x,v)! 

176 for j in range(1, self.coll.num_nodes + 1): 

177 f = P.build_f(L.f[j], L.u[j], L.time + L.dt * self.coll.nodes[j - 1]) 

178 p[-1].pos += L.dt * (L.dt * self.QQ[m, j] * f) + L.dt * self.coll.Qmat[m, j] * L.u[0].vel 

179 p[-1].vel += L.dt * self.coll.Qmat[m, j] * f 

180 

181 return p 

182 

183 def compute_end_point(self): 

184 """ 

185 Compute u at the right point of the interval 

186 

187 The value uend computed here is a full evaluation of the Picard formulation (always!) 

188 

189 Returns: 

190 None 

191 """ 

192 

193 # get current level and problem description 

194 L = self.level 

195 P = L.prob 

196 

197 # start with u0 and add integral over the full interval (using coll.weights) 

198 L.uend = P.dtype_u(L.u[0]) 

199 for m in range(self.coll.num_nodes): 

200 f = P.build_f(L.f[m + 1], L.u[m + 1], L.time + L.dt * self.coll.nodes[m]) 

201 L.uend.pos += L.dt * (L.dt * self.qQ[m] * f) + L.dt * self.coll.weights[m] * L.u[0].vel 

202 L.uend.vel += L.dt * self.coll.weights[m] * f 

203 # add up tau correction of the full interval (last entry) 

204 if L.tau[-1] is not None: 

205 L.uend += L.tau[-1] 

206 

207 return None 

208 

209 def get_sweeper_mats(self): 

210 """ 

211 Returns the matrices Q, QQ, Qx, QT which define the sweeper. 

212 """ 

213 

214 Q = self.Q[1:, 1:] 

215 QQ = self.QQ[1:, 1:] 

216 Qx = self.Qx[1:, 1:] 

217 QT = self.QT[1:, 1:] 

218 

219 return Q, QQ, Qx, QT 

220 

221 def get_scalar_problems_sweeper_mats(self, lambdas=None): 

222 """ 

223 This function returns the corresponding matrices of an SDC sweep matrix formulation 

224 

225 Args: 

226 lambdas (numpy.narray): the first entry in lambdas is k-spring constant and the second is mu friction. 

227 """ 

228 

229 Q, QQ, Qx, QT = self.get_sweeper_mats() 

230 

231 if lambdas is None: 

232 pass 

233 # should use lambdas from attached problem and make sure it is scalar SDC 

234 raise NotImplementedError("At the moment, the values for the lambda have to be provided") 

235 else: 

236 k = lambdas[0] 

237 mu = lambdas[1] 

238 

239 nnodes = self.coll.num_nodes 

240 dt = self.level.dt 

241 

242 F = np.block( 

243 [ 

244 [-k * np.eye(nnodes), -mu * np.eye(nnodes)], 

245 [-k * np.eye(nnodes), -mu * np.eye(nnodes)], 

246 ] 

247 ) 

248 

249 C_coll = np.block([[np.eye(nnodes), dt * Q], [np.zeros([nnodes, nnodes]), np.eye(nnodes)]]) 

250 Q_coll = np.block( 

251 [ 

252 [dt**2 * QQ, np.zeros([nnodes, nnodes])], 

253 [np.zeros([nnodes, nnodes]), dt * Q], 

254 ] 

255 ) 

256 Q_vv = np.block( 

257 [ 

258 [dt**2 * Qx, np.zeros([nnodes, nnodes])], 

259 [np.zeros([nnodes, nnodes]), dt * QT], 

260 ] 

261 ) 

262 M_vv = np.eye(2 * nnodes) - np.dot(Q_vv, F) 

263 

264 return C_coll, Q_coll, Q_vv, M_vv, F 

265 

266 def get_scalar_problems_manysweep_mats(self, nsweeps, lambdas=None): 

267 """ 

268 For a scalar problem, K sweeps of SDC can be written in matrix form. 

269 

270 Args: 

271 nsweeps (int): number of sweeps 

272 lambdas (numpy.ndarray): the first entry in lambdas is k-spring constant and the second is mu friction. 

273 """ 

274 nnodes = self.coll.num_nodes 

275 

276 C_coll, Q_coll, Q_vv, M_vv, F = self.get_scalar_problems_sweeper_mats(lambdas=lambdas) 

277 

278 K_sdc = np.dot(np.linalg.inv(M_vv), Q_coll - Q_vv) @ F 

279 

280 Keig, Kvec = np.linalg.eig(K_sdc) 

281 

282 Kp_sdc = np.linalg.matrix_power(K_sdc, nsweeps) 

283 Kinv_sdc = np.linalg.inv(np.eye(2 * nnodes) - K_sdc) 

284 

285 Kdot_sdc = np.dot(np.eye(2 * nnodes) - Kp_sdc, Kinv_sdc) 

286 MC = np.dot(np.linalg.inv(M_vv), C_coll) 

287 

288 Mat_sweep = Kp_sdc + np.dot(Kdot_sdc, MC) 

289 

290 return Mat_sweep, np.max(np.abs(Keig)) 

291 

292 def get_scalar_problems_picardsweep_mats(self, nsweeps, lambdas=None): 

293 """ 

294 For a scalar problem, K sweeps of SDC can be written in matrix form. 

295 

296 Args: 

297 nsweeps (int): number of sweeps 

298 lambdas (numpy.ndarray): the first entry in lambdas is k-spring constant and the second is mu friction. 

299 """ 

300 nnodes = self.coll.num_nodes 

301 

302 C_coll, Q_coll, Q_vv, M_vv, F = self.get_scalar_problems_sweeper_mats(lambdas=lambdas) 

303 

304 K_sdc = np.dot(Q_coll, F) 

305 

306 Keig, Kvec = np.linalg.eig(K_sdc) 

307 

308 Kp_sdc = np.linalg.matrix_power(K_sdc, nsweeps) 

309 Kinv_sdc = np.linalg.inv(np.eye(2 * nnodes) - K_sdc) 

310 

311 Kdot_sdc = np.dot(np.eye(2 * nnodes) - Kp_sdc, Kinv_sdc) 

312 

313 Mat_sweep = Kp_sdc + np.dot(Kdot_sdc, C_coll) 

314 

315 return Mat_sweep, np.max(np.abs(Keig))