Coverage for pySDC/helpers/fieldsIO.py: 69%
322 statements
« prev ^ index » next coverage.py v7.8.0, created at 2025-04-18 08:18 +0000
« prev ^ index » next coverage.py v7.8.0, created at 2025-04-18 08:18 +0000
1#!/usr/bin/env python3
2# -*- coding: utf-8 -*-
3"""
4Generic utility class to write and read cartesian grid field solutions into binary files.
5It implements the base file handler class :class:`FieldsIO`, that is specialized into :
7- :class:`Scalar` : for 0D fields (scalar) with a given number of variables
8- :class:`Rectilinear` : for fields on N-dimensional rectilinear grids
10While each file handler need to be setup with specific parameters (grid, ...),
11each written file can be read using the same interface implemented in the
12base abstract class.
14Example
15-------
16>>> import numpy as np
17>>> from pySDC.helpers.fieldsIO import Rectilinear
18>>>
19>>> # Write some fields in files
20>>> x = np.linspace(0, 1, 128)
21>>> y = np.linspace(0, 1, 64)
22>>> fOut = Rectilinear(np.float64, "file.pysdc")
23>>> fOut.setHeader(nVar=2, coords=[x, y])
24>>> fOut.initialize()
25>>> times = [0, 1, 2]
26>>> xGrid, yGrid = np.meshgrid(x, y, indexing="ij")
27>>> u0 = np.array([-1, 1]).reshape((-1, 1, 1))*xGrid*yGrid
28>>> # u0 has shape [2, nX, nY]
29>>> for t in times:
30>>> fOut.addField(t, t*u0)
31>>>
32>>> # Read the file using the generic interface
33>>> from pySDC.helpers.fieldsIO import FieldsIO
34>>> fIn = FieldsIO.fromFile("file.pysdc")
35>>> times = fIn.times
36>>> assert len(times) == fIn.nFields
37>>> tEnd, uEnd = fIn.readField(-1)
38>>> assert tEnd == times[-1]
40Notes
41-----
42🚀 :class:`Rectilinear` is compatible with a MPI-based cartesian decomposition.
43See :class:`pySDC.helpers.fieldsIO.writeFields_MPI` for an illustrative example.
45Warning
46-------
47To use MPI collective writing, you need to call first the class methods :class:`Rectilinear.initMPI` (cf their docstring).
48Also, `Rectilinear.setHeader` **must be given the global grids coordinates**, whether the code is run in parallel or not.
49"""
50import os
51import numpy as np
52from typing import Type, TypeVar
53import logging
54import itertools
56T = TypeVar("T")
58try:
59 try:
60 import dolfin as df # noqa: F841 (for some reason, dolfin always needs to be imported before mpi4py)
61 except ImportError:
62 pass
63 from mpi4py import MPI
64except ImportError:
66 class MPI:
67 COMM_WORLD = None
68 Intracomm = T
71# Supported data types
72DTYPES = {
73 0: np.float64, # double precision
74 1: np.complex128,
75}
76try:
77 DTYPES.update(
78 {
79 2: np.float128, # quadruple precision
80 3: np.complex256,
81 }
82 )
83except AttributeError:
84 logging.getLogger('FieldsIO').debug('Warning: Quadruple precision not available on this machine')
85try:
86 DTYPES.update(
87 {
88 4: np.float32, # single precision
89 5: np.complex64,
90 }
91 )
92except AttributeError:
93 logging.getLogger('FieldsIO').debug('Warning: Single precision not available on this machine')
95DTYPES_AVAIL = {val: key for key, val in DTYPES.items()}
97# Header dtype
98H_DTYPE = np.int8
99T_DTYPE = np.float64
102class FieldsIO:
103 """Abstract IO file handler"""
105 STRUCTS = {} # supported structures, modified dynamically
106 sID = None # structure ID of the FieldsIO class, modified dynamically
108 tSize = T_DTYPE().itemsize
110 ALLOW_OVERWRITE = False
112 def __init__(self, dtype, fileName):
113 """
114 Parameters
115 ----------
116 dtype : np.dtype
117 The data type of the fields values.
118 fileName : str
119 File.
120 """
121 assert dtype in DTYPES_AVAIL, f"{dtype=} not available. Supported on this machine: {list(DTYPES_AVAIL.keys())}"
122 self.dtype = dtype
123 self.fileName = fileName
124 self.initialized = False
126 # Initialized by the setHeader abstract method
127 self.header = None
128 self.nItems = None # number of values (dof) stored into one field
130 def __str__(self):
131 return f"FieldsIO[{self.__class__.__name__}|{self.dtype.__name__}|file:{self.fileName}]<{hex(id(self))}>"
133 def __repr__(self):
134 return self.__str__()
136 @classmethod
137 def fromFile(cls, fileName):
138 """
139 Read a file storing fields, and return the `FieldsIO` of the appropriate
140 field type (structure).
142 Parameters
143 ----------
144 fileName : str
145 Name of the binary file.
147 Returns
148 -------
149 fieldsIO : :class:`FieldsIO`
150 The specialized `FieldsIO` adapted to the file.
151 """
152 assert os.path.isfile(fileName), f"not a file ({fileName})"
153 with open(fileName, "rb") as f:
154 STRUCT, DTYPE = np.fromfile(f, dtype=H_DTYPE, count=2)
155 fieldsIO: FieldsIO = cls.STRUCTS[STRUCT](DTYPES[DTYPE], fileName)
156 fieldsIO.readHeader(f)
157 fieldsIO.initialized = True
158 return fieldsIO
160 @property
161 def hBase(self) -> np.ndarray:
162 """Base header into numpy array format"""
163 return np.array([self.sID, DTYPES_AVAIL[self.dtype]], dtype=H_DTYPE)
165 @classmethod
166 def register(cls, sID):
167 """
168 Decorator used to register a new class FieldsIO specialized class
170 Parameters
171 ----------
172 sID : int
173 Unique identifyer for the file, used in the binary file.
174 Since it's encoded on a 8-bytes signed integer,
175 it must be between -128 and 127
177 Example
178 -------
179 >>> # New specialized FieldsIO class
180 >>> @FieldsIO.register(sID=31)
181 >>> class HexaMesh2D(FieldsIO):
182 >>> pass # ... implementation
183 """
185 def wrapper(registered: Type[T]) -> Type[T]:
186 assert (
187 sID not in cls.STRUCTS
188 ), f"struct ID already taken by {cls.STRUCTS[sID]}, cannot use it for {registered}"
189 cls.STRUCTS[sID] = registered
190 registered.sID = sID
191 return registered
193 return wrapper
195 def initialize(self):
196 """Initialize the file handler : create the file with header, removing any existing file with the same name"""
197 assert self.header is not None, "header must be set before initializing FieldsIO"
198 assert not self.initialized, "FieldsIO already initialized"
200 if not self.ALLOW_OVERWRITE:
201 if os.path.isfile(self.fileName):
202 raise FileExistsError(
203 f"file {self.fileName!r} already exists, use FieldsIO.ALLOW_OVERWRITE = True to allow overwriting"
204 )
206 with open(self.fileName, "w+b") as f:
207 self.hBase.tofile(f)
208 for array in self.hInfos:
209 array.tofile(f)
210 self.initialized = True
212 def setHeader(self, **params):
213 """(Abstract) Set the header before creating a new file to store the fields"""
214 raise NotImplementedError()
216 @property
217 def hInfos(self) -> list[np.ndarray]:
218 """(Abstract) Array representing the grid structure to be written in the binary file."""
219 raise NotImplementedError()
221 def readHeader(self, f):
222 """
223 (Abstract) Read the header from the file storing the fields.
225 Parameters
226 ----------
227 f : `_io.TextIOWrapper`
228 File to read the header from.
229 """
230 raise NotImplementedError()
232 @property
233 def hSize(self):
234 """Size of the full header (in bytes)"""
235 return self.hBase.nbytes + sum(hInfo.nbytes for hInfo in self.hInfos)
237 @property
238 def itemSize(self):
239 """Size of one field value (in bytes)"""
240 return self.dtype().itemsize
242 @property
243 def fSize(self):
244 """Full size of a field (in bytes)"""
245 return self.nItems * self.itemSize
247 @property
248 def fileSize(self):
249 """Current size of the file (in bytes)"""
250 return os.path.getsize(self.fileName)
252 def addField(self, time, field):
253 """
254 Append one field solution at the end of the file with one given time.
256 Parameters
257 ----------
258 time : float-like
259 The associated time of the field solution.
260 field : np.ndarray
261 The field values.
262 """
263 assert self.initialized, "cannot add field to a non initialized FieldsIO"
264 field = np.asarray(field)
265 assert field.dtype == self.dtype, f"expected {self.dtype} dtype, got {field.dtype}"
266 assert field.size == self.nItems, f"expected {self.nItems} values, got {field.size}"
267 with open(self.fileName, "ab") as f:
268 np.array(time, dtype=T_DTYPE).tofile(f)
269 field.tofile(f)
271 @property
272 def nFields(self):
273 """Number of fields currently stored in the binary file"""
274 return int((self.fileSize - self.hSize) // (self.tSize + self.fSize))
276 def formatIndex(self, idx):
277 """Utility method to format a fields index to a positional integer (negative starts from last field index, like python lists)"""
278 nFields = self.nFields
279 if idx < 0:
280 idx = nFields + idx
281 assert idx < nFields, f"cannot read index {idx} from {nFields} fields"
282 assert idx >= 0, f"cannot read index {idx-nFields} from {nFields} fields"
283 return idx
285 @property
286 def times(self):
287 """Vector of all times stored in the binary file"""
288 times = []
289 with open(self.fileName, "rb") as f:
290 f.seek(self.hSize)
291 for i in range(self.nFields):
292 t = np.fromfile(f, dtype=T_DTYPE, count=1, offset=0 if i == 0 else self.fSize)[0]
293 times.append(float(t))
294 return times
296 def time(self, idx):
297 """Time stored at a given field index"""
298 idx = self.formatIndex(idx)
299 offset = self.hSize + idx * (self.tSize + self.fSize)
300 with open(self.fileName, "rb") as f:
301 t = np.fromfile(f, dtype=T_DTYPE, count=1, offset=offset)[0]
302 return float(t)
304 def readField(self, idx):
305 """
306 Read one field stored in the binary file, corresponding to the given
307 time index.
309 Parameters
310 ----------
311 idx : int
312 Positional index of the field.
314 Returns
315 -------
316 t : float
317 Stored time for this field.
318 field : np.ndarray
319 Read fields in a numpy array.
320 """
321 idx = self.formatIndex(idx)
322 offset = self.hSize + idx * (self.tSize + self.fSize)
323 with open(self.fileName, "rb") as f:
324 f.seek(offset)
325 t = float(np.fromfile(f, dtype=T_DTYPE, count=1)[0])
326 field = np.fromfile(f, dtype=self.dtype, count=self.nItems)
327 self.reshape(field)
328 return t, field
330 def reshape(self, field):
331 """Eventually reshape the field to correspond to the grid structure"""
332 pass
335@FieldsIO.register(sID=0)
336class Scalar(FieldsIO):
337 """FieldsIO handler storing a given number of scalar"""
339 # -------------------------------------------------------------------------
340 # Overridden methods
341 # -------------------------------------------------------------------------
342 def setHeader(self, nVar):
343 """
344 Set the descriptive grid structure to be stored in the file header.
346 Parameters
347 ----------
348 nVar : int
349 Number of scalar variable stored.
350 """
351 self.header = {"nVar": int(nVar)}
352 self.nItems = self.nVar
354 @property
355 def hInfos(self):
356 """Array representing the grid structure to be written in the binary file."""
357 return [np.array([self.nVar], dtype=np.int64)]
359 def readHeader(self, f):
360 """
361 Read the header from the binary file storing the fields.
363 Parameters
364 ----------
365 f : `_io.TextIOWrapper`
366 File to read the header from.
367 """
368 (nVar,) = np.fromfile(f, dtype=np.int64, count=1)
369 self.setHeader(nVar)
371 # -------------------------------------------------------------------------
372 # Class specifics
373 # -------------------------------------------------------------------------
374 @property
375 def nVar(self):
376 """Number of variables in a fields, as described in the header"""
377 return self.header["nVar"]
380@FieldsIO.register(sID=1)
381class Rectilinear(Scalar):
382 """FieldsIO handler storing a given number of scalar variables on a N-dimensional rectilinear grid"""
384 @staticmethod
385 def setupCoords(*coords):
386 """Utility function to setup grids in multiple dimensions, given the keyword arguments"""
387 coords = [np.asarray(coord, dtype=np.float64) for coord in coords]
388 for axis, coord in enumerate(coords):
389 assert coord.ndim == 1, f"coord for {axis=} must be one dimensional"
390 return coords
392 # -------------------------------------------------------------------------
393 # Overridden methods
394 # -------------------------------------------------------------------------
395 def setHeader(self, nVar, coords):
396 """
397 Set the descriptive grid structure to be stored in the file header.
399 Parameters
400 ----------
401 nVar : int
402 Number of 1D variables stored.
403 coords : np.1darray or list[np.1darray]
404 The grid coordinates in each dimensions.
406 Note
407 ----
408 When used in MPI decomposition mode, all coordinate **must** be the global grid.
409 """
410 if not isinstance(coords, (tuple, list)):
411 coords = [coords]
412 coords = self.setupCoords(*coords)
413 self.header = {"nVar": int(nVar), "coords": coords}
414 self.nItems = nVar * self.nDoF
416 @property
417 def hInfos(self):
418 """Array representing the grid structure to be written in the binary file."""
419 return [np.array([self.nVar, self.dim, *self.gridSizes], dtype=np.int32)] + [
420 np.array(coord, dtype=np.float64) for coord in self.header["coords"]
421 ]
423 def readHeader(self, f):
424 """
425 Read the header from the binary file storing the fields.
427 Parameters
428 ----------
429 f : `_io.TextIOWrapper`
430 File to read the header from.
431 """
432 nVar, dim = np.fromfile(f, dtype=np.int32, count=2)
433 gridSizes = np.fromfile(f, dtype=np.int32, count=dim)
434 coords = [np.fromfile(f, dtype=np.float64, count=n) for n in gridSizes]
435 self.setHeader(nVar, coords)
437 def reshape(self, fields: np.ndarray):
438 """Reshape the fields to a N-d array (inplace operation)"""
439 fields.shape = (self.nVar, *self.gridSizes)
441 # -------------------------------------------------------------------------
442 # Class specifics
443 # -------------------------------------------------------------------------
444 @property
445 def gridSizes(self):
446 """Number of points in y direction"""
447 return [coord.size for coord in self.header["coords"]]
449 @property
450 def dim(self):
451 """Number of grid dimensions"""
452 return len(self.gridSizes)
454 @property
455 def nDoF(self):
456 """Number of degrees of freedom for one variable"""
457 return np.prod(self.gridSizes)
459 def toVTR(self, baseName, varNames, idxFormat="{:06d}"):
460 """
461 Convert all 3D fields stored in binary format (FieldsIO) into a list
462 of VTR files, that can be read later with Paraview or equivalent to
463 make videos.
465 Parameters
466 ----------
467 baseName : str
468 Base name of the VTR file.
469 varNames : list[str]
470 Variable names of the fields.
471 idxFormat : str, optional
472 Formatting string for the index of the VTR file.
473 The default is "{:06d}".
475 Example
476 -------
477 >>> # Suppose the FieldsIO object is already written into outputs.pysdc
478 >>> import os
479 >>> from pySDC.utils.fieldsIO import Rectilinear
480 >>> os.makedirs("vtrFiles") # to store all VTR files into a subfolder
481 >>> Rectilinear.fromFile("outputs.pysdc").toVTR(
482 >>> baseName="vtrFiles/field", varNames=["u", "v", "w", "T", "p"])
483 """
484 assert self.dim == 3, "can only be used with 3D fields"
485 from pySDC.helpers.vtkIO import writeToVTR
487 template = f"{baseName}_{idxFormat}"
488 for i in range(self.nFields):
489 _, u = self.readField(i)
490 writeToVTR(template.format(i), u, self.header["coords"], varNames)
492 # -------------------------------------------------------------------------
493 # MPI-parallel implementation
494 # -------------------------------------------------------------------------
495 comm: MPI.Intracomm = None
496 _nCollectiveIO = None
498 @classmethod
499 def setupMPI(cls, comm: MPI.Intracomm, iLoc, nLoc):
500 """
501 Setup the MPI mode for the files IO, considering a decomposition
502 of the 1D grid into contiguous subintervals.
504 Parameters
505 ----------
506 comm : MPI.Intracomm
507 The space decomposition communicator.
508 iLoc : list[int]
509 Starting index of the local sub-domain in the global coordinates.
510 nLoc : list[int]
511 Number of points in the local sub-domain.
512 """
513 cls.comm = comm
514 cls.iLoc = iLoc
515 cls.nLoc = nLoc
516 cls.mpiFile = None
517 cls._nCollectiveIO = None
519 @property
520 def nCollectiveIO(self):
521 """
522 Number of collective IO operations over all processes, when reading or writing a field.
524 Returns:
525 --------
526 int: Number of collective IO accesses
527 """
528 if self._nCollectiveIO is None:
529 self._nCollectiveIO = self.comm.allreduce(self.nVar * np.prod(self.nLoc[:-1]), op=MPI.MAX)
530 return self._nCollectiveIO
532 @property
533 def MPI_ON(self):
534 """Wether or not MPI is activated"""
535 if self.comm is None:
536 return False
537 return self.comm.Get_size() > 1
539 @property
540 def MPI_ROOT(self):
541 """Wether or not the process is MPI Root"""
542 if self.comm is None:
543 return True
544 return self.comm.Get_rank() == 0
546 def MPI_FILE_OPEN(self, mode):
547 """Open the binary file in MPI mode"""
548 amode = {
549 "r": MPI.MODE_RDONLY,
550 "a": MPI.MODE_WRONLY | MPI.MODE_APPEND,
551 }[mode]
552 self.mpiFile = MPI.File.Open(self.comm, self.fileName, amode)
554 def MPI_WRITE(self, data):
555 """Write data (np.ndarray) in the binary file in MPI mode, at the current file cursor position."""
556 self.mpiFile.Write(data)
558 def MPI_WRITE_AT_ALL(self, offset, data: np.ndarray):
559 """
560 Write data in the binary file in MPI mode, with a given offset
561 **relative to the beginning of the file**.
563 Parameters
564 ----------
565 offset : int
566 Offset to write at, relative to the beginning of the file, in bytes.
567 data : np.ndarray
568 Data to be written in the binary file.
569 """
570 self.mpiFile.Write_at_all(offset, data)
572 def MPI_READ_AT_ALL(self, offset, data: np.ndarray):
573 """
574 Read data from the binary file in MPI mode, with a given offset
575 **relative to the beginning of the file**.
577 Parameters
578 ----------
579 offset : int
580 Offset to read at, relative to the beginning of the file, in bytes.
581 data : np.ndarray
582 Array on which to read the data from the binary file.
583 """
584 self.mpiFile.Read_at_all(offset, data)
586 def MPI_FILE_CLOSE(self):
587 """Close the binary file in MPI mode"""
588 self.mpiFile.Close()
589 self.mpiFile = None
591 def initialize(self):
592 """Initialize the binary file (write header) in MPI mode"""
593 if self.MPI_ROOT:
594 try:
595 super().initialize()
596 except AssertionError as e:
597 if self.MPI_ON:
598 print(f"{type(e)}: {e}")
599 self.comm.Abort()
600 else:
601 raise e
603 if self.MPI_ON:
604 self.comm.Barrier() # Important, should not be removed !
605 self.initialized = True
607 def addField(self, time, field):
608 """
609 Append one field solution at the end of the file with one given time,
610 possibly using MPI.
612 Parameters
613 ----------
614 time : float-like
615 The associated time of the field solution.
616 field : np.ndarray
617 The (local) field values.
619 Note
620 ----
621 If a MPI decomposition is used, field **must be** the local field values.
622 """
623 if not self.MPI_ON:
624 return super().addField(time, field)
626 assert self.initialized, "cannot add field to a non initialized FieldsIO"
628 field = np.asarray(field)
629 assert field.dtype == self.dtype, f"expected {self.dtype} dtype, got {field.dtype}"
630 assert field.shape == (
631 self.nVar,
632 *self.nLoc,
633 ), f"expected {(self.nVar, *self.nLoc)} shape, got {field.shape}"
635 offset0 = self.fileSize
636 self.MPI_FILE_OPEN(mode="a")
637 nWrites = 0
638 nCollectiveIO = self.nCollectiveIO
640 if self.MPI_ROOT:
641 self.MPI_WRITE(np.array(time, dtype=T_DTYPE))
642 offset0 += self.tSize
644 for (iVar, *iBeg) in itertools.product(range(self.nVar), *[range(n) for n in self.nLoc[:-1]]):
645 offset = offset0 + self.iPos(iVar, iBeg) * self.itemSize
646 self.MPI_WRITE_AT_ALL(offset, field[(iVar, *iBeg)])
647 nWrites += 1
649 for _ in range(nCollectiveIO - nWrites):
650 # Additional collective write to catch up with other processes
651 self.MPI_WRITE_AT_ALL(offset0, field[:0])
653 self.MPI_FILE_CLOSE()
655 def iPos(self, iVar, iX):
656 iPos = iVar * self.nDoF
657 for axis in range(self.dim - 1):
658 iPos += (self.iLoc[axis] + iX[axis]) * np.prod(self.gridSizes[axis + 1 :])
659 iPos += self.iLoc[-1]
660 return iPos
662 def readField(self, idx):
663 """
664 Read one field stored in the binary file, corresponding to the given
665 time index, using MPI in the eventuality of space parallel decomposition.
667 Parameters
668 ----------
669 idx : int
670 Positional index of the field.
672 Returns
673 -------
674 t : float
675 Stored time for this field.
676 field : np.ndarray
677 Read (local) fields in a numpy array.
679 Note
680 ----
681 If a MPI decomposition is used, it reads and returns the local fields values only.
682 """
683 if not self.MPI_ON:
684 return super().readField(idx)
686 idx = self.formatIndex(idx)
687 offset0 = self.hSize + idx * (self.tSize + self.fSize)
688 with open(self.fileName, "rb") as f:
689 t = float(np.fromfile(f, dtype=T_DTYPE, count=1, offset=offset0)[0])
690 offset0 += self.tSize
692 field = np.empty((self.nVar, *self.nLoc), dtype=self.dtype)
694 self.MPI_FILE_OPEN(mode="r")
695 nReads = 0
696 nCollectiveIO = self.nCollectiveIO
698 for (iVar, *iBeg) in itertools.product(range(self.nVar), *[range(n) for n in self.nLoc[:-1]]):
699 offset = offset0 + self.iPos(iVar, iBeg) * self.itemSize
700 self.MPI_READ_AT_ALL(offset, field[(iVar, *iBeg)])
701 nReads += 1
703 for _ in range(nCollectiveIO - nReads):
704 # Additional collective read to catch up with other processes
705 self.MPI_READ_AT_ALL(offset0, field[:0])
707 self.MPI_FILE_CLOSE()
709 return t, field
712# -----------------------------------------------------------------------------------------------
713# Utility functions used for testing
714# -----------------------------------------------------------------------------------------------
715def initGrid(nVar, gridSizes):
716 dim = len(gridSizes)
717 coords = [np.linspace(0, 1, num=n, endpoint=False) for n in gridSizes]
718 s = [None] * dim
719 u0 = np.array(np.arange(nVar) + 1)[(slice(None), *s)]
720 for x in np.meshgrid(*coords, indexing="ij"):
721 u0 = u0 * x
722 return coords, u0
725def writeFields_MPI(fileName, dtypeIdx, algo, nSteps, nVar, gridSizes):
726 coords, u0 = initGrid(nVar, gridSizes)
728 from mpi4py import MPI
729 from pySDC.helpers.blocks import BlockDecomposition
730 from pySDC.helpers.fieldsIO import Rectilinear
732 comm = MPI.COMM_WORLD
733 MPI_SIZE = comm.Get_size()
734 MPI_RANK = comm.Get_rank()
736 blocks = BlockDecomposition(MPI_SIZE, gridSizes, algo, MPI_RANK)
738 iLoc, nLoc = blocks.localBounds
739 Rectilinear.setupMPI(comm, iLoc, nLoc)
740 s = [slice(i, i + n) for i, n in zip(iLoc, nLoc)]
741 u0 = u0[(slice(None), *s)]
743 f1 = Rectilinear(DTYPES[dtypeIdx], fileName)
744 f1.setHeader(nVar=nVar, coords=coords)
746 u0 = np.asarray(u0, dtype=f1.dtype)
747 f1.initialize()
749 times = np.arange(nSteps) / nSteps
750 for t in times:
751 ut = (u0 * t).astype(f1.dtype)
752 f1.addField(t, ut)
754 return u0
757def compareFields_MPI(fileName, u0, nSteps):
758 from pySDC.helpers.fieldsIO import FieldsIO
760 comm = MPI.COMM_WORLD
761 MPI_RANK = comm.Get_rank()
762 if MPI_RANK == 0:
763 print("Comparing fields with MPI")
765 f2 = FieldsIO.fromFile(fileName)
767 times = np.arange(nSteps) / nSteps
768 for idx, t in enumerate(times):
769 u1 = u0 * t
770 t2, u2 = f2.readField(idx)
771 assert t2 == t, f"fields[{idx}] in {f2} has incorrect time ({t2} instead of {t})"
772 assert u2.shape == u1.shape, f"{idx}'s fields in {f2} has incorrect shape"
773 assert np.allclose(u2, u1), f"{idx}'s fields in {f2} has incorrect values"