Coverage for pySDC/implementations/sweeper_classes/boris_2nd_order.py: 99%
118 statements
« prev ^ index » next coverage.py v7.10.6, created at 2025-09-04 15:08 +0000
« prev ^ index » next coverage.py v7.10.6, created at 2025-09-04 15:08 +0000
1import numpy as np
3from pySDC.core.sweeper import Sweeper
6class boris_2nd_order(Sweeper):
7 """
8 Custom sweeper class, implements Sweeper.py
10 Second-order sweeper using velocity-Verlet with Boris scheme as base integrator
12 Attributes:
13 S: node-to-node collocation matrix (first order)
14 SQ: node-to-node collocation matrix (second order)
15 ST: node-to-node trapezoidal matrix
16 Sx: node-to-node Euler half-step for position update
17 """
19 def __init__(self, params, level):
20 """
21 Initialization routine for the custom sweeper
23 Args:
24 params: parameters for the sweeper
25 level (pySDC.Level.level): the level that uses this sweeper
26 """
28 if "QI" not in params:
29 params["QI"] = "IE"
30 if "QE" not in params:
31 params["QE"] = "EE"
33 super(boris_2nd_order, self).__init__(params, level)
35 # S- and SQ-matrices (derived from Q) and Sx- and ST-matrices for the integrator
36 [
37 self.S,
38 self.ST,
39 self.SQ,
40 self.Sx,
41 self.QQ,
42 self.QI,
43 self.QT,
44 self.Qx,
45 self.Q,
46 ] = self.__get_Qd()
48 self.qQ = np.dot(self.coll.weights, self.coll.Qmat[1:, 1:])
50 def __get_Qd(self):
51 """
52 Get integration matrices for 2nd-order SDC
54 Returns:
55 S: node-to-node collocation matrix (first order)
56 SQ: node-to-node collocation matrix (second order)
57 ST: node-to-node trapezoidal matrix
58 Sx: node-to-node Euler half-step for position update
59 """
61 # set implicit and explicit Euler matrices (default, but can be changed)
62 QI = self.get_Qdelta_implicit(qd_type=self.params.QI)
63 QE = self.get_Qdelta_explicit(qd_type=self.params.QE)
65 # trapezoidal rule
66 QT = 1 / 2 * (QI + QE)
68 # Qx as in the paper
69 Qx = np.dot(QE, QT) + 1 / 2 * QE * QE
71 Sx = np.zeros(np.shape(self.coll.Qmat))
72 ST = np.zeros(np.shape(self.coll.Qmat))
73 S = np.zeros(np.shape(self.coll.Qmat))
75 # fill-in node-to-node matrices
76 Sx[0, :] = Qx[0, :]
77 ST[0, :] = QT[0, :]
78 S[0, :] = self.coll.Qmat[0, :]
79 for m in range(self.coll.num_nodes):
80 Sx[m + 1, :] = Qx[m + 1, :] - Qx[m, :]
81 ST[m + 1, :] = QT[m + 1, :] - QT[m, :]
82 S[m + 1, :] = self.coll.Qmat[m + 1, :] - self.coll.Qmat[m, :]
83 # SQ via dot-product, could also be done via QQ
84 SQ = np.dot(S, self.coll.Qmat)
86 # QQ-matrix via product of Q
87 QQ = np.dot(self.coll.Qmat, self.coll.Qmat)
89 return [S, ST, SQ, Sx, QQ, QI, QT, Qx, self.coll.Qmat]
91 def update_nodes(self):
92 """
93 Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes
95 Returns:
96 None
97 """
99 # get current level and problem description
100 L = self.level
101 P = L.prob
103 # only if the level has been touched before
104 assert L.status.unlocked
106 # get number of collocation nodes for easier access
107 M = self.coll.num_nodes
109 # initialize integral terms with zeros, will add stuff later
110 integral = [P.dtype_u(P.init, val=0.0) for l in range(M)]
112 # gather all terms which are known already (e.g. from the previous iteration)
113 # this corresponds to SF(u^k) - SdF(u^k) + tau (note: have integrals in pos and vel!)
114 for m in range(M):
115 for j in range(M + 1):
116 # build RHS from f-terms (containing the E field) and the B field
117 f = P.build_f(L.f[j], L.u[j], L.time + L.dt * self.coll.nodes[j - 1])
118 # add SQF(u^k) - SxF(u^k) for the position
119 integral[m].pos += L.dt * (L.dt * (self.SQ[m + 1, j] - self.Sx[m + 1, j]) * f)
120 # add SF(u^k) - STF(u^k) for the velocity
121 integral[m].vel += L.dt * (self.S[m + 1, j] - self.ST[m + 1, j]) * f
122 # add tau if associated
123 if L.tau[m] is not None:
124 integral[m] += L.tau[m]
125 # tau is 0-to-node, need to change it to node-to-node here
126 if m > 0:
127 integral[m] -= L.tau[m - 1]
129 # do the sweep
130 for m in range(0, M):
131 # build rhs, consisting of the known values from above and new values from previous nodes (at k+1)
132 tmp = P.dtype_u(integral[m])
133 for j in range(m + 1):
134 # build RHS from f-terms (containing the E field) and the B field
135 f = P.build_f(L.f[j], L.u[j], L.time + L.dt * self.coll.nodes[j - 1])
136 # add SxF(u^{k+1})
137 tmp.pos += L.dt * (L.dt * self.Sx[m + 1, j] * f)
138 # add pos at previous node + dt*v0
139 tmp.pos += L.u[m].pos + L.dt * self.coll.delta_m[m] * L.u[0].vel
140 # set new position, is explicit
141 L.u[m + 1].pos = tmp.pos
143 # get E field with new positions and compute mean
144 L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])
146 ck = tmp.vel
148 # do the boris scheme
149 L.u[m + 1].vel = P.boris_solver(ck, L.dt * np.diag(self.QI)[m + 1], L.f[m], L.f[m + 1], L.u[m])
151 # indicate presence of new values at this level
152 L.status.updated = True
154 return None
156 def integrate(self):
157 """
158 Integrates the right-hand side
160 Returns:
161 list of dtype_u: containing the integral as values
162 """
164 # get current level and problem description
165 L = self.level
166 P = L.prob
168 # create new instance of dtype_u, initialize values with 0
169 p = []
171 for m in range(1, self.coll.num_nodes + 1):
172 p.append(P.dtype_u(P.init, val=0.0))
174 # integrate RHS over all collocation nodes, RHS is here only f(x,v)!
175 for j in range(1, self.coll.num_nodes + 1):
176 f = P.build_f(L.f[j], L.u[j], L.time + L.dt * self.coll.nodes[j - 1])
177 p[-1].pos += L.dt * (L.dt * self.QQ[m, j] * f) + L.dt * self.coll.Qmat[m, j] * L.u[0].vel
178 p[-1].vel += L.dt * self.coll.Qmat[m, j] * f
180 return p
182 def compute_end_point(self):
183 """
184 Compute u at the right point of the interval
186 The value uend computed here is a full evaluation of the Picard formulation (always!)
188 Returns:
189 None
190 """
192 # get current level and problem description
193 L = self.level
194 P = L.prob
196 # start with u0 and add integral over the full interval (using coll.weights)
197 L.uend = P.dtype_u(L.u[0])
198 for m in range(self.coll.num_nodes):
199 f = P.build_f(L.f[m + 1], L.u[m + 1], L.time + L.dt * self.coll.nodes[m])
200 L.uend.pos += L.dt * (L.dt * self.qQ[m] * f) + L.dt * self.coll.weights[m] * L.u[0].vel
201 L.uend.vel += L.dt * self.coll.weights[m] * f
202 # add up tau correction of the full interval (last entry)
203 if L.tau[-1] is not None:
204 L.uend += L.tau[-1]
206 return None
208 def get_sweeper_mats(self):
209 """
210 Returns the matrices Q, QQ, Qx, QT which define the sweeper.
211 """
213 Q = self.Q[1:, 1:]
214 QQ = self.QQ[1:, 1:]
215 Qx = self.Qx[1:, 1:]
216 QT = self.QT[1:, 1:]
218 return Q, QQ, Qx, QT
220 def get_scalar_problems_sweeper_mats(self, lambdas=None):
221 """
222 This function returns the corresponding matrices of an SDC sweep matrix formulation
224 Args:
225 lambdas (numpy.narray): the first entry in lambdas is k-spring constant and the second is mu friction.
226 """
228 Q, QQ, Qx, QT = self.get_sweeper_mats()
230 if lambdas is None:
231 pass
232 # should use lambdas from attached problem and make sure it is scalar SDC
233 raise NotImplementedError("At the moment, the values for the lambda have to be provided")
234 else:
235 k = lambdas[0]
236 mu = lambdas[1]
238 nnodes = self.coll.num_nodes
239 dt = self.level.dt
241 F = np.block(
242 [
243 [-k * np.eye(nnodes), -mu * np.eye(nnodes)],
244 [-k * np.eye(nnodes), -mu * np.eye(nnodes)],
245 ]
246 )
248 C_coll = np.block([[np.eye(nnodes), dt * Q], [np.zeros([nnodes, nnodes]), np.eye(nnodes)]])
249 Q_coll = np.block(
250 [
251 [dt**2 * QQ, np.zeros([nnodes, nnodes])],
252 [np.zeros([nnodes, nnodes]), dt * Q],
253 ]
254 )
255 Q_vv = np.block(
256 [
257 [dt**2 * Qx, np.zeros([nnodes, nnodes])],
258 [np.zeros([nnodes, nnodes]), dt * QT],
259 ]
260 )
261 M_vv = np.eye(2 * nnodes) - np.dot(Q_vv, F)
263 return C_coll, Q_coll, Q_vv, M_vv, F
265 def get_scalar_problems_manysweep_mats(self, nsweeps, lambdas=None):
266 """
267 For a scalar problem, K sweeps of SDC can be written in matrix form.
269 Args:
270 nsweeps (int): number of sweeps
271 lambdas (numpy.ndarray): the first entry in lambdas is k-spring constant and the second is mu friction.
272 """
273 nnodes = self.coll.num_nodes
275 C_coll, Q_coll, Q_vv, M_vv, F = self.get_scalar_problems_sweeper_mats(lambdas=lambdas)
277 K_sdc = np.dot(np.linalg.inv(M_vv), Q_coll - Q_vv) @ F
279 Keig, Kvec = np.linalg.eig(K_sdc)
281 Kp_sdc = np.linalg.matrix_power(K_sdc, nsweeps)
282 Kinv_sdc = np.linalg.inv(np.eye(2 * nnodes) - K_sdc)
284 Kdot_sdc = np.dot(np.eye(2 * nnodes) - Kp_sdc, Kinv_sdc)
285 MC = np.dot(np.linalg.inv(M_vv), C_coll)
287 Mat_sweep = Kp_sdc + np.dot(Kdot_sdc, MC)
289 return Mat_sweep, np.max(np.abs(Keig))
291 def get_scalar_problems_picardsweep_mats(self, nsweeps, lambdas=None):
292 """
293 For a scalar problem, K sweeps of SDC can be written in matrix form.
295 Args:
296 nsweeps (int): number of sweeps
297 lambdas (numpy.ndarray): the first entry in lambdas is k-spring constant and the second is mu friction.
298 """
299 nnodes = self.coll.num_nodes
301 C_coll, Q_coll, Q_vv, M_vv, F = self.get_scalar_problems_sweeper_mats(lambdas=lambdas)
303 K_sdc = np.dot(Q_coll, F)
305 Keig, Kvec = np.linalg.eig(K_sdc)
307 Kp_sdc = np.linalg.matrix_power(K_sdc, nsweeps)
308 Kinv_sdc = np.linalg.inv(np.eye(2 * nnodes) - K_sdc)
310 Kdot_sdc = np.dot(np.eye(2 * nnodes) - Kp_sdc, Kinv_sdc)
312 Mat_sweep = Kp_sdc + np.dot(Kdot_sdc, C_coll)
314 return Mat_sweep, np.max(np.abs(Keig))