Coverage for pySDC/projects/DAE/run/run_iteration_test.py: 100%
65 statements
« prev ^ index » next coverage.py v7.5.0, created at 2024-04-29 09:02 +0000
« prev ^ index » next coverage.py v7.5.0, created at 2024-04-29 09:02 +0000
1import numpy as np
2import statistics
3import pickle
5from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
6from pySDC.projects.DAE.problems.simple_DAE import simple_dae_1
7from pySDC.projects.DAE.problems.transistor_amplifier import one_transistor_amplifier
8from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE
9from pySDC.projects.DAE.misc.HookClass_DAE import LogGlobalErrorPostStepDifferentialVariable
10from pySDC.helpers.stats_helper import get_sorted
11from pySDC.helpers.stats_helper import filter_stats
14def setup():
15 """
16 Routine to initialise iteration test parameters
17 """
19 # initialize level parameters
20 level_params = dict()
21 level_params['restol'] = -1
22 # Used for generating the first set of plots. Chose this because in the convergence plots the three collocation methods investigated had converged. Maybe too big? -> actually looked at results for different step sizes. There was no real difference.
23 # level_params['dt'] = 1e-3
24 level_params['dt'] = 1e-4
26 # This comes as read-in for the sweeper class
27 sweeper_params = dict()
28 sweeper_params['quad_type'] = 'RADAU-RIGHT'
30 # This comes as read-in for the problem class
31 problem_params = dict()
32 # Absolute termination tollerance for implicit solver
33 # Exactly how this is used can be adjusted in update_nodes() in the fully implicit sweeper
34 problem_params['newton_tol'] = 1e-7
36 # This comes as read-in for the step class
37 step_params = dict()
39 # initialize controller parameters
40 controller_params = dict()
41 controller_params['logger_level'] = 30
42 controller_params['hook_class'] = LogGlobalErrorPostStepDifferentialVariable
44 # Fill description dictionary for easy hierarchy creation
45 description = dict()
46 description['problem_class'] = simple_dae_1
47 description['problem_params'] = problem_params
48 description['sweeper_class'] = fully_implicit_DAE
49 description['sweeper_params'] = sweeper_params
50 description['level_params'] = level_params
51 description['step_params'] = step_params
53 # set simulation parameters
54 run_params = dict()
55 run_params['t0'] = 0.0
56 run_params['tend'] = 0.1
57 max_iter_low = 4
58 max_iter_high = 6
59 run_params['max_iter_list'] = list(range(max_iter_low, max_iter_high))
60 run_params['qd_list'] = ['IE', 'LU']
61 run_params['num_nodes_list'] = [3]
63 return description, controller_params, run_params
66def run(description, controller_params, run_params):
67 """
68 Routine to run simulation
69 """
70 conv_data = dict()
72 for qd_type in run_params['qd_list']:
73 description['sweeper_params']['QI'] = qd_type
74 conv_data[qd_type] = dict()
76 for num_nodes in run_params['num_nodes_list']:
77 description['sweeper_params']['num_nodes'] = num_nodes
78 conv_data[qd_type][num_nodes] = dict()
79 conv_data[qd_type][num_nodes]['error'] = np.zeros_like(run_params['max_iter_list'], dtype=float)
80 conv_data[qd_type][num_nodes]['residual'] = np.zeros_like(run_params['max_iter_list'], dtype=float)
81 conv_data[qd_type][num_nodes]['niter'] = np.zeros_like(run_params['max_iter_list'], dtype='int')
82 conv_data[qd_type][num_nodes]['max_iter'] = run_params['max_iter_list']
84 for i, max_iter in enumerate(run_params['max_iter_list']):
85 print('Working on Qdelta=%s -- num. nodes=%i -- max. iter.=%i' % (qd_type, num_nodes, max_iter))
86 description['step_params']['maxiter'] = max_iter
88 # instantiate the controller
89 controller = controller_nonMPI(
90 num_procs=1, controller_params=controller_params, description=description
91 )
92 # get initial values
93 P = controller.MS[0].levels[0].prob
94 uinit = P.u_exact(run_params['t0'])
96 # call main function to get things done...
97 uend, stats = controller.run(u0=uinit, t0=run_params['t0'], Tend=run_params['tend'])
99 # compute exact solution and compare
100 err = get_sorted(stats, type='e_global_differential_post_step', sortby='time')
101 residual = get_sorted(stats, type='residual_post_step', sortby='time')
102 niter = filter_stats(stats, type='niter')
104 conv_data[qd_type][num_nodes]['error'][i] = np.linalg.norm([err[j][1] for j in range(len(err))], np.inf)
105 conv_data[qd_type][num_nodes]['residual'][i] = np.linalg.norm(
106 [residual[j][1] for j in range(len(residual))], np.inf
107 )
108 conv_data[qd_type][num_nodes]['niter'][i] = round(statistics.mean(niter.values()))
109 print(
110 "Error=",
111 conv_data[qd_type][num_nodes]['error'][i],
112 " Residual=",
113 conv_data[qd_type][num_nodes]['residual'][i],
114 )
116 return conv_data
119if __name__ == "__main__":
120 """
121 Routine to run simple differential-algebraic-equation example with various max iters, preconditioners and collocation node counts
122 In contrast to run_convergence_test.py, in which max iters is set large enough to not be the limiting factor, max iters is varied for a fixed time step and the improvement in the error is measured
123 Error data is stored in a dictionary and then pickled for use with the loglog_plot.py routine
124 """
126 description, controller_params, run_params = setup()
127 conv_data = run(description, controller_params, run_params)
128 pickle.dump(conv_data, open("data/dae_iter_data.p", 'wb'))
129 print("Done")