PFASST++
vector_impl.hpp
Go to the documentation of this file.
1 #include <algorithm>
2 #include <cassert>
3 
5 
6 namespace pfasst
7 {
8  namespace encap
9  {
10  template<typename scalar, typename time>
12  : vector<scalar>(size)
13  {
14  zero();
15  }
16 
17  template<typename scalar, typename time>
19  : vector<scalar>(other)
20  {}
21 
22  template<typename scalar, typename time>
24  : VectorEncapsulation(dynamic_cast<const VectorEncapsulation<scalar, time>>(other))
25  {}
26 
27  template<typename scalar, typename time>
29  : vector<scalar>(other)
30  {}
31 
32  template<typename scalar, typename time>
34  : VectorEncapsulation(dynamic_cast<VectorEncapsulation<scalar, time>&&>(other))
35  {}
36 
37  template<typename scalar, typename time>
39  {
40 #ifdef WITH_MPI
41  if (this->send_request != MPI_REQUEST_NULL) {
42  MPI_Status stat = MPI_Status_factory();
43  ML_CLOG(DEBUG, "Encap", "waiting for open send request");
44  int err = MPI_Wait(&(this->send_request), &stat);
45  check_mpi_error(err);
46  ML_CLOG(DEBUG, "Encap", "waited for open send request");
47  }
48  assert(this->recv_request == MPI_REQUEST_NULL);
49  assert(this->send_request == MPI_REQUEST_NULL);
50 #endif
51  }
52 
53  template<typename scalar, typename time>
55  {
56  this->assign(this->size(), scalar(0.0));
57  }
58 
59  template<typename scalar, typename time>
61  {
62  shared_ptr<const VectorEncapsulation<scalar, time>> x_cast = \
63  dynamic_pointer_cast<const VectorEncapsulation<scalar, time>>(x);
64  assert(x_cast);
65  this->copy(x_cast);
66  }
67 
68  template<typename scalar, typename time>
70  {
71  std::copy(x->cbegin(), x->cend(), this->begin());
72  }
73 
74  template<typename scalar, typename time>
76  {
77  shared_ptr<const VectorEncapsulation<scalar, time>> x_cast = \
78  dynamic_pointer_cast<const VectorEncapsulation<scalar, time>>(x);
79  assert(x_cast);
80 
81  this->saxpy(a, x_cast);
82  }
83 
84  template<typename scalar, typename time>
86  {
87  assert(this->size() == x->size());
88  for (size_t i = 0; i < this->size(); i++)
89  { this->data()[i] += a * x->data()[i]; }
90  }
91 
92  template<typename scalar, typename time>
93  void
95  time a, Matrix<time> mat,
96  vector<shared_ptr<Encapsulation<time>>> src,
97  bool zero)
98  {
99  size_t ndst = dst.size();
100  size_t nsrc = src.size();
101 
102  vector<shared_ptr<VectorEncapsulation<scalar, time>>> dst_cast(ndst), src_cast(nsrc);
103  for (size_t n = 0; n < ndst; n++) {
104  dst_cast[n] = dynamic_pointer_cast<VectorEncapsulation<scalar, time>>(dst[n]);
105  assert(dst_cast[n]);
106  }
107  for (size_t m = 0; m < nsrc; m++) {
108  src_cast[m] = dynamic_pointer_cast<VectorEncapsulation<scalar, time>>(src[m]);
109  assert(src_cast[m]);
110  }
111 
112  dst_cast[0]->mat_apply(dst_cast, a, mat, src_cast, zero);
113  }
114 
115  template<typename scalar, typename time>
116  void
118  time a, Matrix<time> mat,
119  vector<shared_ptr<VectorEncapsulation<scalar, time>>> src,
120  bool zero)
121  {
122  size_t ndst = dst.size();
123  size_t nsrc = src.size();
124 
125  if (zero) { for (auto elem : dst) { elem->zero(); } }
126 
127  size_t ndofs = dst[0]->size();
128  for (size_t i = 0; i < ndofs; i++) {
129  for (size_t n = 0; n < ndst; n++) {
130  assert(dst[n]->size() == ndofs);
131  for (size_t m = 0; m < nsrc; m++) {
132  assert(src[m]->size() == ndofs);
133  dst[n]->data()[i] += a * mat(n, m) * src[m]->data()[i];
134  }
135  }
136  }
137  }
138 
139  template<typename scalar, typename time>
141  {
142  return std::abs(*std::max_element(this->cbegin(), this->cend(),
143  [](scalar a, scalar b) {return std::abs(a) < std::abs(b); } ));
144  }
145 
146 
147  template<typename scalar, typename time>
149  : size(size)
150  {}
151 
152  template<typename scalar, typename time>
154  {
155  return size;
156  }
157 
158  template<typename scalar, typename time>
159  shared_ptr<Encapsulation<time>> VectorFactory<scalar, time>::create(const EncapType)
160  {
161  return make_shared<VectorEncapsulation<scalar, time>>(this->dofs());
162  }
163 
164 
165  template<typename scalar, typename time>
167  {
168  typedef VectorEncapsulation<scalar,time> VectorT;
169  shared_ptr<VectorT> y = dynamic_pointer_cast<VectorT>(x);
170  assert(y);
171  return *y.get();
172  }
173 
174  template<typename scalar, typename time>
176  {
177  typedef VectorEncapsulation<scalar,time> VectorT;
178  shared_ptr<const VectorT> y = dynamic_pointer_cast<const VectorT>(x);
179  assert(y);
180  return *y.get();
181  }
182 
183 #ifdef WITH_MPI
184  template<typename scalar, typename time>
186  {
187  auto& mpi = as_mpi(comm);
188  if (mpi.size() == 1) { return; }
189  if (mpi.rank() == 0) { return; }
190 
191  if (this->recv_request != MPI_REQUEST_NULL) {
192  throw MPIError("a previous receive request is still open");
193  }
194 
195  int src = (mpi.rank() - 1) % mpi.size();
196  ML_CLOG(DEBUG, "Encap", "non-blocking receiving from rank " << src << " with tag=" << tag);
197  int err = MPI_Irecv(this->data(), sizeof(scalar) * this->size(), MPI_CHAR,
198  src, tag, mpi.comm, &this->recv_request);
199  check_mpi_error(err);
200  ML_CLOG(DEBUG, "Encap", "non-blocking received from rank " << src << " with tag=" << tag);
201  }
202 
203  template<typename scalar, typename time>
204  void VectorEncapsulation<scalar, time>::recv(ICommunicator* comm, int tag, bool blocking)
205  {
206  auto& mpi = as_mpi(comm);
207  if (mpi.size() == 1) { return; }
208  if (mpi.rank() == 0) { return; }
209 
210  MPI_Status stat = MPI_Status_factory();
211  int err = MPI_SUCCESS;
212 
213  if (blocking) {
214  int src = (mpi.rank() - 1) % mpi.size();
215  ML_CLOG(DEBUG, "Encap", "blocking receive from rank " << src << " with tag=" << tag);
216  err = MPI_Recv(this->data(), sizeof(scalar) * this->size(), MPI_CHAR,
217  src, tag, mpi.comm, &stat);
218  check_mpi_error(err);
219  ML_CLOG(DEBUG, "Encap", "received blocking from rank " << src << " with tag=" << tag << ": " << stat);
220  } else {
221  if (this->recv_request != MPI_REQUEST_NULL) {
222  ML_CLOG(DEBUG, "Encap", "waiting on last receive request");
223  err = MPI_Wait(&(this->recv_request), &stat);
224  check_mpi_error(err);
225  ML_CLOG(DEBUG, "Encap", "waited on last receive request: " << stat);
226  }
227  }
228  }
229 
230  template<typename scalar, typename time>
231  void VectorEncapsulation<scalar, time>::send(ICommunicator* comm, int tag, bool blocking)
232  {
233  auto& mpi = as_mpi(comm);
234  if (mpi.size() == 1) { return; }
235  if (mpi.rank() == mpi.size() - 1) { return; }
236 
237  MPI_Status stat = MPI_Status_factory();
238  int err = MPI_SUCCESS;
239  int dest = (mpi.rank() + 1) % mpi.size();
240 
241  if (blocking) {
242  ML_CLOG(DEBUG, "Encap", "blocking send to rank " << dest << " with tag=" << tag);
243  err = MPI_Send(this->data(), sizeof(scalar) * this->size(), MPI_CHAR, dest, tag, mpi.comm);
244  check_mpi_error(err);
245  ML_CLOG(DEBUG, "Encap", "sent blocking to rank " << dest << " with tag=" << tag);
246  } else {
247  // got never in here
248  ML_CLOG(DEBUG, "Encap", "waiting on last send request to finish");
249  err = MPI_Wait(&(this->send_request), &stat);
250  check_mpi_error(err);
251  ML_CLOG(DEBUG, "Encap", "waited on last send request: " << stat);
252  ML_CLOG(DEBUG, "Encap", "non-blocking sending to rank " << dest << " with tag=" << tag);
253  err = MPI_Isend(this->data(), sizeof(scalar) * this->size(), MPI_CHAR,
254  dest, tag, mpi.comm, &(this->send_request));
255  check_mpi_error(err);
256  ML_CLOG(DEBUG, "Encap", "sent non-blocking to rank " << dest << " with tag=" << tag);
257  }
258  }
259 
260  template<typename scalar, typename time>
261  void VectorEncapsulation<scalar, time>::broadcast(ICommunicator* comm)
262  {
263  auto& mpi = as_mpi(comm);
264  ML_CLOG(DEBUG, "Encap", "broadcasting");
265  int err = MPI_Bcast(this->data(), sizeof(scalar) * this->size(), MPI_CHAR,
266  comm->size()-1, mpi.comm);
267  check_mpi_error(err);
268  ML_CLOG(DEBUG, "Encap", "broadcasted");
269  }
270 #endif
271 
272  } // ::pfasst::encap
273 } // ::pfasst
static void zero(vector< precision > &data)
virtual shared_ptr< Encapsulation< time > > create(const EncapType) override
Actual method to create Encapsulation object of specific type.
tuple data
Definition: plot.py:7
virtual void broadcast(ICommunicator *comm)
Broadcasting this data structure to all processes in comm.
virtual void saxpy(time a, shared_ptr< const Encapsulation< time >> x) override
Provides basic mathematical operation \( y+=ax \).
Definition: vector_impl.hpp:75
#define ML_CLOG(level, logger_id, x)
same as CLOG(level, logger, x) from easylogging++
Definition: logging.hpp:117
virtual void recv(ICommunicator *comm, int tag, bool blocking)
Receive values to store in this data structure.
virtual void zero() override
Zeroes out all values of this data structure.
Definition: vector_impl.hpp:54
virtual void copy(shared_ptr< const Encapsulation< time >> x) override
Copies values from other into this data structure.
Definition: vector_impl.hpp:60
virtual time norm0() const override
Maximum norm of contained elements.
virtual void send(ICommunicator *comm, int tag, bool blocking)
Send values stored in this data structure.
Data/solution encapsulation.
VectorEncapsulation(const size_t size)
Definition: vector_impl.hpp:11
Abstract interface for communicators.
Definition: interfaces.hpp:70
virtual void mat_apply(vector< shared_ptr< Encapsulation< time >>> dst, time a, Matrix< time > mat, vector< shared_ptr< Encapsulation< time >>> src, bool zero=true) override
Definition: vector_impl.hpp:94
Eigen::Matrix< scalar, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor > Matrix
static void check_mpi_error(const int err_code)
checks MPI error code
virtual void post(ICommunicator *comm, int tag)
VectorFactory(const size_t size)
static MPI_Status MPI_Status_factory()
creates and initializes a new empty MPI_Status object
VectorEncapsulation< scalar, time > & as_vector(shared_ptr< Encapsulation< time >> x)