Coverage for pySDC/implementations/sweeper_classes/verlet.py: 88%

73 statements  

« prev     ^ index     » next       coverage.py v7.5.0, created at 2024-04-29 09:02 +0000

1import numpy as np 

2 

3from pySDC.core.Sweeper import sweeper 

4 

5 

6class verlet(sweeper): 

7 """ 

8 Custom sweeper class, implements Sweeper.py 

9 

10 Second-order sweeper using velocity-Verlet as base integrator 

11 

12 Attributes: 

13 QQ: 0-to-node collocation matrix (second order) 

14 QT: 0-to-node trapezoidal matrix 

15 Qx: 0-to-node Euler half-step for position update 

16 qQ: update rule for final value (if needed) 

17 """ 

18 

19 def __init__(self, params): 

20 """ 

21 Initialization routine for the custom sweeper 

22 

23 Args: 

24 params: parameters for the sweeper 

25 """ 

26 

27 if 'QI' not in params: 

28 params['QI'] = 'IE' 

29 if 'QE' not in params: 

30 params['QE'] = 'EE' 

31 

32 # call parent's initialization routine 

33 super(verlet, self).__init__(params) 

34 

35 # Trapezoidal rule, Qx and Double-Q as in the Boris-paper 

36 [self.QT, self.Qx, self.QQ] = self.__get_Qd() 

37 

38 self.qQ = np.dot(self.coll.weights, self.coll.Qmat[1:, 1:]) 

39 

40 def __get_Qd(self): 

41 """ 

42 Get integration matrices for 2nd-order SDC 

43 

44 Returns: 

45 S: node-to-node collocation matrix (first order) 

46 SQ: node-to-node collocation matrix (second order) 

47 ST: node-to-node trapezoidal matrix 

48 Sx: node-to-node Euler half-step for position update 

49 """ 

50 

51 # set implicit and explicit Euler matrices 

52 QI = self.get_Qdelta_implicit(self.coll, self.params.QI) 

53 QE = self.get_Qdelta_explicit(self.coll, self.params.QE) 

54 

55 # trapezoidal rule 

56 QT = 0.5 * (QI + QE) 

57 # QT = QI 

58 

59 # Qx as in the paper 

60 Qx = np.dot(QE, QT) + 0.5 * QE * QE 

61 

62 QQ = np.zeros(np.shape(self.coll.Qmat)) 

63 

64 # if we have Gauss-Lobatto nodes, we can do a magic trick from the Book 

65 # this takes Gauss-Lobatto IIIB and create IIIA out of this 

66 if self.coll.node_type == 'LEGENDRE' and self.coll.quad_type == 'LOBATTO': 

67 for m in range(self.coll.num_nodes): 

68 for n in range(self.coll.num_nodes): 

69 QQ[m + 1, n + 1] = self.coll.weights[n] * ( 

70 1.0 - self.coll.Qmat[n + 1, m + 1] / self.coll.weights[m] 

71 ) 

72 QQ = np.dot(self.coll.Qmat, QQ) 

73 

74 # if we do not have Gauss-Lobatto, just multiply Q (will not get a symplectic method, they say) 

75 else: 

76 QQ = np.dot(self.coll.Qmat, self.coll.Qmat) 

77 

78 return [QT, Qx, QQ] 

79 

80 def update_nodes(self): 

81 """ 

82 Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes 

83 

84 Returns: 

85 None 

86 """ 

87 

88 # get current level and problem description 

89 L = self.level 

90 P = L.prob 

91 

92 # only if the level has been touched before 

93 assert L.status.unlocked 

94 

95 # get number of collocation nodes for easier access 

96 M = self.coll.num_nodes 

97 

98 # gather all terms which are known already (e.g. from the previous iteration) 

99 # get QF(u^k) 

100 integral = self.integrate() 

101 for m in range(M): 

102 # get -QdF(u^k)_m 

103 for j in range(1, M + 1): 

104 integral[m].pos -= L.dt * (L.dt * self.Qx[m + 1, j] * L.f[j]) 

105 integral[m].vel -= L.dt * self.QT[m + 1, j] * L.f[j] 

106 

107 # add initial value 

108 integral[m].pos += L.u[0].pos 

109 integral[m].vel += L.u[0].vel 

110 # add tau if associated 

111 if L.tau[m] is not None: 

112 integral[m] += L.tau[m] 

113 

114 # do the sweep 

115 for m in range(0, M): 

116 # build rhs, consisting of the known values from above and new values from previous nodes (at k+1) 

117 L.u[m + 1] = P.dtype_u(integral[m]) 

118 for j in range(1, m + 1): 

119 # add QxF(u^{k+1}) 

120 L.u[m + 1].pos += L.dt * (L.dt * self.Qx[m + 1, j] * L.f[j]) 

121 L.u[m + 1].vel += L.dt * self.QT[m + 1, j] * L.f[j] 

122 

123 # get RHS with new positions 

124 L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m]) 

125 

126 L.u[m + 1].vel += L.dt * self.QT[m + 1, m + 1] * L.f[m + 1] 

127 

128 # indicate presence of new values at this level 

129 L.status.updated = True 

130 

131 # # do the sweep (alternative description) 

132 # for m in range(0, M): 

133 # # build rhs, consisting of the known values from above and new values from previous nodes (at k+1) 

134 # L.u[m + 1] = P.dtype_u(integral[m]) 

135 # for j in range(1, m + 1): 

136 # # add QxF(u^{k+1}) 

137 # L.u[m + 1].pos += L.dt * (L.dt * self.Qx[m + 1, j] * L.f[j]) 

138 # 

139 # # get RHS with new positions 

140 # L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m]) 

141 # 

142 # for m in range(0, M): 

143 # for n in range(0, M): 

144 # L.u[m + 1].vel += L.dt * self.QT[m + 1, n + 1] * L.f[n + 1] 

145 # 

146 # # indicate presence of new values at this level 

147 # L.status.updated = True 

148 

149 return None 

150 

151 def integrate(self): 

152 """ 

153 Integrates the right-hand side 

154 

155 Returns: 

156 list of dtype_u: containing the integral as values 

157 """ 

158 

159 # get current level and problem description 

160 L = self.level 

161 P = L.prob 

162 # create new instance of dtype_u, initialize values with 0 

163 p = [] 

164 for m in range(1, self.coll.num_nodes + 1): 

165 p.append(P.dtype_u(P.init, val=0.0)) 

166 

167 # integrate RHS over all collocation nodes, RHS is here only f(x)! 

168 for j in range(1, self.coll.num_nodes + 1): 

169 p[-1].pos += L.dt * (L.dt * self.QQ[m, j] * L.f[j]) + L.dt * self.coll.Qmat[m, j] * L.u[0].vel 

170 p[-1].vel += L.dt * self.coll.Qmat[m, j] * L.f[j] 

171 # we need to set mass and charge here, too, since the code uses the integral to create new particles 

172 p[-1].m = L.u[0].m 

173 p[-1].q = L.u[0].q 

174 

175 return p 

176 

177 def compute_end_point(self): 

178 """ 

179 Compute u at the right point of the interval 

180 

181 The value uend computed here is a full evaluation of the Picard formulation (always!) 

182 

183 Returns: 

184 None 

185 """ 

186 

187 # get current level and problem description 

188 L = self.level 

189 P = L.prob 

190 

191 # start with u0 and add integral over the full interval (using coll.weights) 

192 if self.coll.right_is_node and not self.params.do_coll_update: 

193 # a copy is sufficient 

194 L.uend = P.dtype_u(L.u[-1]) 

195 else: 

196 L.uend = P.dtype_u(L.u[0]) 

197 for m in range(self.coll.num_nodes): 

198 L.uend.pos += L.dt * (L.dt * self.qQ[m] * L.f[m + 1]) + L.dt * self.coll.weights[m] * L.u[0].vel 

199 L.uend.vel += L.dt * self.coll.weights[m] * L.f[m + 1] 

200 # remember to set mass and charge here, too 

201 L.uend.m = L.u[0].m 

202 L.uend.q = L.u[0].q 

203 # add up tau correction of the full interval (last entry) 

204 if L.tau[-1] is not None: 

205 L.uend += L.tau[-1] 

206 

207 return None