Coverage for pySDC/helpers/fieldsIO.py: 69%
322 statements
« prev ^ index » next coverage.py v7.8.0, created at 2025-04-01 13:12 +0000
« prev ^ index » next coverage.py v7.8.0, created at 2025-04-01 13:12 +0000
1#!/usr/bin/env python3
2# -*- coding: utf-8 -*-
3"""
4Generic utility class to write and read cartesian grid field solutions into binary files.
5It implements the base file handler class :class:`FieldsIO`, that is specialized into :
7- :class:`Scalar` : for 0D fields (scalar) with a given number of variables
8- :class:`Rectilinear` : for fields on N-dimensional rectilinear grids
10While each file handler need to be setup with specific parameters (grid, ...),
11each written file can be read using the same interface implemented in the
12base abstract class.
14Example
15-------
16>>> import numpy as np
17>>> from pySDC.helpers.fieldsIO import Rectilinear
18>>>
19>>> # Write some fields in files
20>>> x = np.linspace(0, 1, 128)
21>>> y = np.linspace(0, 1, 64)
22>>> fOut = Rectilinear(np.float64, "file.pysdc")
23>>> fOut.setHeader(nVar=2, coords=[x, y])
24>>> fOut.initialize()
25>>> times = [0, 1, 2]
26>>> xGrid, yGrid = np.meshgrid(x, y, indexing="ij")
27>>> u0 = np.array([-1, 1]).reshape((-1, 1, 1))*xGrid*yGrid
28>>> # u0 has shape [2, nX, nY]
29>>> for t in times:
30>>> fOut.addField(t, t*u0)
31>>>
32>>> # Read the file using the generic interface
33>>> from pySDC.helpers.fieldsIO import FieldsIO
34>>> fIn = FieldsIO.fromFile("file.pysdc")
35>>> times = fIn.times
36>>> assert len(times) == fIn.nFields
37>>> tEnd, uEnd = fIn.readField(-1)
38>>> assert tEnd == times[-1]
40Notes
41-----
42🚀 :class:`Rectilinear` is compatible with a MPI-based cartesian decomposition.
43See :class:`pySDC.helpers.fieldsIO.writeFields_MPI` for an illustrative example.
45Warning
46-------
47To use MPI collective writing, you need to call first the class methods :class:`Rectilinear.initMPI` (cf their docstring).
48Also, `Rectilinear.setHeader` **must be given the global grids coordinates**, whether the code is run in parallel or not.
49"""
50import os
51import numpy as np
52from typing import Type, TypeVar
53import logging
54import itertools
56T = TypeVar("T")
58try:
59 try:
60 import dolfin as df # noqa: F841 (for some reason, dolfin always needs to be imported before mpi4py)
61 except ImportError:
62 pass
63 from mpi4py import MPI
64except ImportError:
66 class MPI:
67 COMM_WORLD = None
68 Intracomm = T
71# Supported data types
72DTYPES = {
73 0: np.float64, # double precision
74 1: np.complex128,
75}
76try:
77 DTYPES.update(
78 {
79 2: np.float128, # quadruple precision
80 3: np.complex256,
81 }
82 )
83except AttributeError:
84 logging.getLogger('FieldsIO').debug('Warning: Quadruple precision not available on this machine')
85try:
86 DTYPES.update(
87 {
88 4: np.float32, # single precision
89 5: np.complex64,
90 }
91 )
92except AttributeError:
93 logging.getLogger('FieldsIO').debug('Warning: Single precision not available on this machine')
95DTYPES_AVAIL = {val: key for key, val in DTYPES.items()}
97# Header dtype
98H_DTYPE = np.int8
99T_DTYPE = np.float64
102class FieldsIO:
103 """Abstract IO file handler"""
105 STRUCTS = {} # supported structures, modified dynamically
106 sID = None # structure ID of the FieldsIO class, modified dynamically
108 tSize = T_DTYPE().itemsize
110 ALLOW_OVERWRITE = False
112 def __init__(self, dtype, fileName):
113 """
114 Parameters
115 ----------
116 dtype : np.dtype
117 The data type of the fields values.
118 fileName : str
119 File.
120 """
121 assert dtype in DTYPES_AVAIL, f"{dtype=} not available. Supported on this machine: {list(DTYPES_AVAIL.keys())}"
122 self.dtype = dtype
123 self.fileName = fileName
124 self.initialized = False
126 # Initialized by the setHeader abstract method
127 self.header = None
128 self.nItems = None # number of values (dof) stored into one field
130 def __str__(self):
131 return f"FieldsIO[{self.__class__.__name__}|{self.dtype.__name__}|file:{self.fileName}]<{hex(id(self))}>"
133 def __repr__(self):
134 return self.__str__()
136 @classmethod
137 def fromFile(cls, fileName):
138 """
139 Read a file storing fields, and return the `FieldsIO` of the appropriate
140 field type (structure).
142 Parameters
143 ----------
144 fileName : str
145 Name of the binary file.
147 Returns
148 -------
149 fieldsIO : :class:`FieldsIO`
150 The specialized `FieldsIO` adapted to the file.
151 """
152 assert os.path.isfile(fileName), f"not a file ({fileName})"
153 with open(fileName, "rb") as f:
154 STRUCT, DTYPE = np.fromfile(f, dtype=H_DTYPE, count=2)
155 fieldsIO: FieldsIO = cls.STRUCTS[STRUCT](DTYPES[DTYPE], fileName)
156 fieldsIO.readHeader(f)
157 fieldsIO.initialized = True
158 return fieldsIO
160 @property
161 def hBase(self) -> np.ndarray:
162 """Base header into numpy array format"""
163 return np.array([self.sID, DTYPES_AVAIL[self.dtype]], dtype=H_DTYPE)
165 @classmethod
166 def register(cls, sID):
167 """
168 Decorator used to register a new class FieldsIO specialized class
170 Parameters
171 ----------
172 sID : int
173 Unique identifyer for the file, used in the binary file.
174 Since it's encoded on a 8-bytes signed integer,
175 it must be between -128 and 127
177 Example
178 -------
179 >>> # New specialized FieldsIO class
180 >>> @FieldsIO.register(sID=31)
181 >>> class HexaMesh2D(FieldsIO):
182 >>> pass # ... implementation
183 """
185 def wrapper(registered: Type[T]) -> Type[T]:
186 assert (
187 sID not in cls.STRUCTS
188 ), f"struct ID already taken by {cls.STRUCTS[sID]}, cannot use it for {registered}"
189 cls.STRUCTS[sID] = registered
190 registered.sID = sID
191 return registered
193 return wrapper
195 def initialize(self):
196 """Initialize the file handler : create the file with header, removing any existing file with the same name"""
197 assert self.header is not None, "header must be set before initializing FieldsIO"
198 assert not self.initialized, "FieldsIO already initialized"
200 if not self.ALLOW_OVERWRITE:
201 assert not os.path.isfile(
202 self.fileName
203 ), f"file {self.fileName!r} already exists, use FieldsIO.ALLOW_OVERWRITE = True to allow overwriting"
205 with open(self.fileName, "w+b") as f:
206 self.hBase.tofile(f)
207 for array in self.hInfos:
208 array.tofile(f)
209 self.initialized = True
211 def setHeader(self, **params):
212 """(Abstract) Set the header before creating a new file to store the fields"""
213 raise NotImplementedError()
215 @property
216 def hInfos(self) -> list[np.ndarray]:
217 """(Abstract) Array representing the grid structure to be written in the binary file."""
218 raise NotImplementedError()
220 def readHeader(self, f):
221 """
222 (Abstract) Read the header from the file storing the fields.
224 Parameters
225 ----------
226 f : `_io.TextIOWrapper`
227 File to read the header from.
228 """
229 raise NotImplementedError()
231 @property
232 def hSize(self):
233 """Size of the full header (in bytes)"""
234 return self.hBase.nbytes + sum(hInfo.nbytes for hInfo in self.hInfos)
236 @property
237 def itemSize(self):
238 """Size of one field value (in bytes)"""
239 return self.dtype().itemsize
241 @property
242 def fSize(self):
243 """Full size of a field (in bytes)"""
244 return self.nItems * self.itemSize
246 @property
247 def fileSize(self):
248 """Current size of the file (in bytes)"""
249 return os.path.getsize(self.fileName)
251 def addField(self, time, field):
252 """
253 Append one field solution at the end of the file with one given time.
255 Parameters
256 ----------
257 time : float-like
258 The associated time of the field solution.
259 field : np.ndarray
260 The field values.
261 """
262 assert self.initialized, "cannot add field to a non initialized FieldsIO"
263 field = np.asarray(field)
264 assert field.dtype == self.dtype, f"expected {self.dtype} dtype, got {field.dtype}"
265 assert field.size == self.nItems, f"expected {self.nItems} values, got {field.size}"
266 with open(self.fileName, "ab") as f:
267 np.array(time, dtype=T_DTYPE).tofile(f)
268 field.tofile(f)
270 @property
271 def nFields(self):
272 """Number of fields currently stored in the binary file"""
273 return int((self.fileSize - self.hSize) // (self.tSize + self.fSize))
275 def formatIndex(self, idx):
276 """Utility method to format a fields index to a positional integer (negative starts from last field index, like python lists)"""
277 nFields = self.nFields
278 if idx < 0:
279 idx = nFields + idx
280 assert idx < nFields, f"cannot read index {idx} from {nFields} fields"
281 assert idx >= 0, f"cannot read index {idx-nFields} from {nFields} fields"
282 return idx
284 @property
285 def times(self):
286 """Vector of all times stored in the binary file"""
287 times = []
288 with open(self.fileName, "rb") as f:
289 f.seek(self.hSize)
290 for i in range(self.nFields):
291 t = np.fromfile(f, dtype=T_DTYPE, count=1, offset=0 if i == 0 else self.fSize)[0]
292 times.append(float(t))
293 return times
295 def time(self, idx):
296 """Time stored at a given field index"""
297 idx = self.formatIndex(idx)
298 offset = self.hSize + idx * (self.tSize + self.fSize)
299 with open(self.fileName, "rb") as f:
300 t = np.fromfile(f, dtype=T_DTYPE, count=1, offset=offset)[0]
301 return float(t)
303 def readField(self, idx):
304 """
305 Read one field stored in the binary file, corresponding to the given
306 time index.
308 Parameters
309 ----------
310 idx : int
311 Positional index of the field.
313 Returns
314 -------
315 t : float
316 Stored time for this field.
317 field : np.ndarray
318 Read fields in a numpy array.
319 """
320 idx = self.formatIndex(idx)
321 offset = self.hSize + idx * (self.tSize + self.fSize)
322 with open(self.fileName, "rb") as f:
323 f.seek(offset)
324 t = float(np.fromfile(f, dtype=T_DTYPE, count=1)[0])
325 field = np.fromfile(f, dtype=self.dtype, count=self.nItems)
326 self.reshape(field)
327 return t, field
329 def reshape(self, field):
330 """Eventually reshape the field to correspond to the grid structure"""
331 pass
334@FieldsIO.register(sID=0)
335class Scalar(FieldsIO):
336 """FieldsIO handler storing a given number of scalar"""
338 # -------------------------------------------------------------------------
339 # Overridden methods
340 # -------------------------------------------------------------------------
341 def setHeader(self, nVar):
342 """
343 Set the descriptive grid structure to be stored in the file header.
345 Parameters
346 ----------
347 nVar : int
348 Number of scalar variable stored.
349 """
350 self.header = {"nVar": int(nVar)}
351 self.nItems = self.nVar
353 @property
354 def hInfos(self):
355 """Array representing the grid structure to be written in the binary file."""
356 return [np.array([self.nVar], dtype=np.int64)]
358 def readHeader(self, f):
359 """
360 Read the header from the binary file storing the fields.
362 Parameters
363 ----------
364 f : `_io.TextIOWrapper`
365 File to read the header from.
366 """
367 (nVar,) = np.fromfile(f, dtype=np.int64, count=1)
368 self.setHeader(nVar)
370 # -------------------------------------------------------------------------
371 # Class specifics
372 # -------------------------------------------------------------------------
373 @property
374 def nVar(self):
375 """Number of variables in a fields, as described in the header"""
376 return self.header["nVar"]
379@FieldsIO.register(sID=1)
380class Rectilinear(Scalar):
381 """FieldsIO handler storing a given number of scalar variables on a N-dimensional rectilinear grid"""
383 @staticmethod
384 def setupCoords(*coords):
385 """Utility function to setup grids in multiple dimensions, given the keyword arguments"""
386 coords = [np.asarray(coord, dtype=np.float64) for coord in coords]
387 for axis, coord in enumerate(coords):
388 assert coord.ndim == 1, f"coord for {axis=} must be one dimensional"
389 return coords
391 # -------------------------------------------------------------------------
392 # Overridden methods
393 # -------------------------------------------------------------------------
394 def setHeader(self, nVar, coords):
395 """
396 Set the descriptive grid structure to be stored in the file header.
398 Parameters
399 ----------
400 nVar : int
401 Number of 1D variables stored.
402 coords : np.1darray or list[np.1darray]
403 The grid coordinates in each dimensions.
405 Note
406 ----
407 When used in MPI decomposition mode, all coordinate **must** be the global grid.
408 """
409 if not isinstance(coords, (tuple, list)):
410 coords = [coords]
411 coords = self.setupCoords(*coords)
412 self.header = {"nVar": int(nVar), "coords": coords}
413 self.nItems = nVar * self.nDoF
415 @property
416 def hInfos(self):
417 """Array representing the grid structure to be written in the binary file."""
418 return [np.array([self.nVar, self.dim, *self.gridSizes], dtype=np.int32)] + [
419 np.array(coord, dtype=np.float64) for coord in self.header["coords"]
420 ]
422 def readHeader(self, f):
423 """
424 Read the header from the binary file storing the fields.
426 Parameters
427 ----------
428 f : `_io.TextIOWrapper`
429 File to read the header from.
430 """
431 nVar, dim = np.fromfile(f, dtype=np.int32, count=2)
432 gridSizes = np.fromfile(f, dtype=np.int32, count=dim)
433 coords = [np.fromfile(f, dtype=np.float64, count=n) for n in gridSizes]
434 self.setHeader(nVar, coords)
436 def reshape(self, fields: np.ndarray):
437 """Reshape the fields to a N-d array (inplace operation)"""
438 fields.shape = (self.nVar, *self.gridSizes)
440 # -------------------------------------------------------------------------
441 # Class specifics
442 # -------------------------------------------------------------------------
443 @property
444 def gridSizes(self):
445 """Number of points in y direction"""
446 return [coord.size for coord in self.header["coords"]]
448 @property
449 def dim(self):
450 """Number of grid dimensions"""
451 return len(self.gridSizes)
453 @property
454 def nDoF(self):
455 """Number of degrees of freedom for one variable"""
456 return np.prod(self.gridSizes)
458 def toVTR(self, baseName, varNames, idxFormat="{:06d}"):
459 """
460 Convert all 3D fields stored in binary format (FieldsIO) into a list
461 of VTR files, that can be read later with Paraview or equivalent to
462 make videos.
464 Parameters
465 ----------
466 baseName : str
467 Base name of the VTR file.
468 varNames : list[str]
469 Variable names of the fields.
470 idxFormat : str, optional
471 Formatting string for the index of the VTR file.
472 The default is "{:06d}".
474 Example
475 -------
476 >>> # Suppose the FieldsIO object is already written into outputs.pysdc
477 >>> import os
478 >>> from pySDC.utils.fieldsIO import Rectilinear
479 >>> os.makedirs("vtrFiles") # to store all VTR files into a subfolder
480 >>> Rectilinear.fromFile("outputs.pysdc").toVTR(
481 >>> baseName="vtrFiles/field", varNames=["u", "v", "w", "T", "p"])
482 """
483 assert self.dim == 3, "can only be used with 3D fields"
484 from pySDC.helpers.vtkIO import writeToVTR
486 template = f"{baseName}_{idxFormat}"
487 for i in range(self.nFields):
488 _, u = self.readField(i)
489 writeToVTR(template.format(i), u, self.header["coords"], varNames)
491 # -------------------------------------------------------------------------
492 # MPI-parallel implementation
493 # -------------------------------------------------------------------------
494 comm: MPI.Intracomm = None
495 _nCollectiveIO = None
497 @classmethod
498 def setupMPI(cls, comm: MPI.Intracomm, iLoc, nLoc):
499 """
500 Setup the MPI mode for the files IO, considering a decomposition
501 of the 1D grid into contiguous subintervals.
503 Parameters
504 ----------
505 comm : MPI.Intracomm
506 The space decomposition communicator.
507 iLoc : list[int]
508 Starting index of the local sub-domain in the global coordinates.
509 nLoc : list[int]
510 Number of points in the local sub-domain.
511 """
512 cls.comm = comm
513 cls.iLoc = iLoc
514 cls.nLoc = nLoc
515 cls.mpiFile = None
516 cls._nCollectiveIO = None
518 @property
519 def nCollectiveIO(self):
520 """
521 Number of collective IO operations over all processes, when reading or writing a field.
523 Returns:
524 --------
525 int: Number of collective IO accesses
526 """
527 if self._nCollectiveIO is None:
528 self._nCollectiveIO = self.comm.allreduce(self.nVar * np.prod(self.nLoc[:-1]), op=MPI.MAX)
529 return self._nCollectiveIO
531 @property
532 def MPI_ON(self):
533 """Wether or not MPI is activated"""
534 if self.comm is None:
535 return False
536 return self.comm.Get_size() > 1
538 @property
539 def MPI_ROOT(self):
540 """Wether or not the process is MPI Root"""
541 if self.comm is None:
542 return True
543 return self.comm.Get_rank() == 0
545 def MPI_FILE_OPEN(self, mode):
546 """Open the binary file in MPI mode"""
547 amode = {
548 "r": MPI.MODE_RDONLY,
549 "a": MPI.MODE_WRONLY | MPI.MODE_APPEND,
550 }[mode]
551 self.mpiFile = MPI.File.Open(self.comm, self.fileName, amode)
553 def MPI_WRITE(self, data):
554 """Write data (np.ndarray) in the binary file in MPI mode, at the current file cursor position."""
555 self.mpiFile.Write(data)
557 def MPI_WRITE_AT_ALL(self, offset, data: np.ndarray):
558 """
559 Write data in the binary file in MPI mode, with a given offset
560 **relative to the beginning of the file**.
562 Parameters
563 ----------
564 offset : int
565 Offset to write at, relative to the beginning of the file, in bytes.
566 data : np.ndarray
567 Data to be written in the binary file.
568 """
569 self.mpiFile.Write_at_all(offset, data)
571 def MPI_READ_AT_ALL(self, offset, data: np.ndarray):
572 """
573 Read data from the binary file in MPI mode, with a given offset
574 **relative to the beginning of the file**.
576 Parameters
577 ----------
578 offset : int
579 Offset to read at, relative to the beginning of the file, in bytes.
580 data : np.ndarray
581 Array on which to read the data from the binary file.
582 """
583 self.mpiFile.Read_at_all(offset, data)
585 def MPI_FILE_CLOSE(self):
586 """Close the binary file in MPI mode"""
587 self.mpiFile.Close()
588 self.mpiFile = None
590 def initialize(self):
591 """Initialize the binary file (write header) in MPI mode"""
592 if self.MPI_ROOT:
593 try:
594 super().initialize()
595 except AssertionError as e:
596 if self.MPI_ON:
597 print(f"{type(e)}: {e}")
598 self.comm.Abort()
599 else:
600 raise e
602 if self.MPI_ON:
603 self.comm.Barrier() # Important, should not be removed !
604 self.initialized = True
606 def addField(self, time, field):
607 """
608 Append one field solution at the end of the file with one given time,
609 possibly using MPI.
611 Parameters
612 ----------
613 time : float-like
614 The associated time of the field solution.
615 field : np.ndarray
616 The (local) field values.
618 Note
619 ----
620 If a MPI decomposition is used, field **must be** the local field values.
621 """
622 if not self.MPI_ON:
623 return super().addField(time, field)
625 assert self.initialized, "cannot add field to a non initialized FieldsIO"
627 field = np.asarray(field)
628 assert field.dtype == self.dtype, f"expected {self.dtype} dtype, got {field.dtype}"
629 assert field.shape == (
630 self.nVar,
631 *self.nLoc,
632 ), f"expected {(self.nVar, *self.nLoc)} shape, got {field.shape}"
634 offset0 = self.fileSize
635 self.MPI_FILE_OPEN(mode="a")
636 nWrites = 0
637 nCollectiveIO = self.nCollectiveIO
639 if self.MPI_ROOT:
640 self.MPI_WRITE(np.array(time, dtype=T_DTYPE))
641 offset0 += self.tSize
643 for (iVar, *iBeg) in itertools.product(range(self.nVar), *[range(n) for n in self.nLoc[:-1]]):
644 offset = offset0 + self.iPos(iVar, iBeg) * self.itemSize
645 self.MPI_WRITE_AT_ALL(offset, field[(iVar, *iBeg)])
646 nWrites += 1
648 for _ in range(nCollectiveIO - nWrites):
649 # Additional collective write to catch up with other processes
650 self.MPI_WRITE_AT_ALL(offset0, field[:0])
652 self.MPI_FILE_CLOSE()
654 def iPos(self, iVar, iX):
655 iPos = iVar * self.nDoF
656 for axis in range(self.dim - 1):
657 iPos += (self.iLoc[axis] + iX[axis]) * np.prod(self.gridSizes[axis + 1 :])
658 iPos += self.iLoc[-1]
659 return iPos
661 def readField(self, idx):
662 """
663 Read one field stored in the binary file, corresponding to the given
664 time index, using MPI in the eventuality of space parallel decomposition.
666 Parameters
667 ----------
668 idx : int
669 Positional index of the field.
671 Returns
672 -------
673 t : float
674 Stored time for this field.
675 field : np.ndarray
676 Read (local) fields in a numpy array.
678 Note
679 ----
680 If a MPI decomposition is used, it reads and returns the local fields values only.
681 """
682 if not self.MPI_ON:
683 return super().readField(idx)
685 idx = self.formatIndex(idx)
686 offset0 = self.hSize + idx * (self.tSize + self.fSize)
687 with open(self.fileName, "rb") as f:
688 t = float(np.fromfile(f, dtype=T_DTYPE, count=1, offset=offset0)[0])
689 offset0 += self.tSize
691 field = np.empty((self.nVar, *self.nLoc), dtype=self.dtype)
693 self.MPI_FILE_OPEN(mode="r")
694 nReads = 0
695 nCollectiveIO = self.nCollectiveIO
697 for (iVar, *iBeg) in itertools.product(range(self.nVar), *[range(n) for n in self.nLoc[:-1]]):
698 offset = offset0 + self.iPos(iVar, iBeg) * self.itemSize
699 self.MPI_READ_AT_ALL(offset, field[(iVar, *iBeg)])
700 nReads += 1
702 for _ in range(nCollectiveIO - nReads):
703 # Additional collective read to catch up with other processes
704 self.MPI_READ_AT_ALL(offset0, field[:0])
706 self.MPI_FILE_CLOSE()
708 return t, field
711# -----------------------------------------------------------------------------------------------
712# Utility functions used for testing
713# -----------------------------------------------------------------------------------------------
714def initGrid(nVar, gridSizes):
715 dim = len(gridSizes)
716 coords = [np.linspace(0, 1, num=n, endpoint=False) for n in gridSizes]
717 s = [None] * dim
718 u0 = np.array(np.arange(nVar) + 1)[(slice(None), *s)]
719 for x in np.meshgrid(*coords, indexing="ij"):
720 u0 = u0 * x
721 return coords, u0
724def writeFields_MPI(fileName, dtypeIdx, algo, nSteps, nVar, gridSizes):
725 coords, u0 = initGrid(nVar, gridSizes)
727 from mpi4py import MPI
728 from pySDC.helpers.blocks import BlockDecomposition
729 from pySDC.helpers.fieldsIO import Rectilinear
731 comm = MPI.COMM_WORLD
732 MPI_SIZE = comm.Get_size()
733 MPI_RANK = comm.Get_rank()
735 blocks = BlockDecomposition(MPI_SIZE, gridSizes, algo, MPI_RANK)
737 iLoc, nLoc = blocks.localBounds
738 Rectilinear.setupMPI(comm, iLoc, nLoc)
739 s = [slice(i, i + n) for i, n in zip(iLoc, nLoc)]
740 u0 = u0[(slice(None), *s)]
742 f1 = Rectilinear(DTYPES[dtypeIdx], fileName)
743 f1.setHeader(nVar=nVar, coords=coords)
745 u0 = np.asarray(u0, dtype=f1.dtype)
746 f1.initialize()
748 times = np.arange(nSteps) / nSteps
749 for t in times:
750 ut = (u0 * t).astype(f1.dtype)
751 f1.addField(t, ut)
753 return u0
756def compareFields_MPI(fileName, u0, nSteps):
757 from pySDC.helpers.fieldsIO import FieldsIO
759 comm = MPI.COMM_WORLD
760 MPI_RANK = comm.Get_rank()
761 if MPI_RANK == 0:
762 print("Comparing fields with MPI")
764 f2 = FieldsIO.fromFile(fileName)
766 times = np.arange(nSteps) / nSteps
767 for idx, t in enumerate(times):
768 u1 = u0 * t
769 t2, u2 = f2.readField(idx)
770 assert t2 == t, f"fields[{idx}] in {f2} has incorrect time ({t2} instead of {t})"
771 assert u2.shape == u1.shape, f"{idx}'s fields in {f2} has incorrect shape"
772 assert np.allclose(u2, u1), f"{idx}'s fields in {f2} has incorrect values"