10 template<
typename scalar,
typename time>
12 : vector<scalar>(size)
17 template<
typename scalar,
typename time>
19 : vector<scalar>(other)
22 template<
typename scalar,
typename time>
27 template<
typename scalar,
typename time>
29 : vector<scalar>(other)
32 template<
typename scalar,
typename time>
37 template<
typename scalar,
typename time>
41 if (this->send_request != MPI_REQUEST_NULL) {
43 ML_CLOG(DEBUG,
"Encap",
"waiting for open send request");
44 int err = MPI_Wait(&(this->send_request), &stat);
46 ML_CLOG(DEBUG,
"Encap",
"waited for open send request");
48 assert(this->recv_request == MPI_REQUEST_NULL);
49 assert(this->send_request == MPI_REQUEST_NULL);
53 template<
typename scalar,
typename time>
56 this->assign(this->size(), scalar(0.0));
59 template<
typename scalar,
typename time>
62 shared_ptr<const VectorEncapsulation<scalar, time>> x_cast = \
68 template<
typename scalar,
typename time>
71 std::copy(x->cbegin(), x->cend(), this->begin());
74 template<
typename scalar,
typename time>
77 shared_ptr<const VectorEncapsulation<scalar, time>> x_cast = \
81 this->saxpy(a, x_cast);
84 template<
typename scalar,
typename time>
87 assert(this->size() == x->size());
88 for (
size_t i = 0; i < this->size(); i++)
89 { this->
data()[i] += a * x->data()[i]; }
92 template<
typename scalar,
typename time>
99 size_t ndst = dst.size();
100 size_t nsrc = src.size();
102 vector<shared_ptr<VectorEncapsulation<scalar, time>>> dst_cast(ndst), src_cast(nsrc);
103 for (
size_t n = 0; n < ndst; n++) {
107 for (
size_t m = 0; m < nsrc; m++) {
112 dst_cast[0]->mat_apply(dst_cast, a, mat, src_cast, zero);
115 template<
typename scalar,
typename time>
122 size_t ndst = dst.size();
123 size_t nsrc = src.size();
125 if (zero) {
for (
auto elem : dst) { elem->zero(); } }
127 size_t ndofs = dst[0]->size();
128 for (
size_t i = 0; i < ndofs; i++) {
129 for (
size_t n = 0; n < ndst; n++) {
130 assert(dst[n]->size() == ndofs);
131 for (
size_t m = 0; m < nsrc; m++) {
132 assert(src[m]->size() == ndofs);
133 dst[n]->data()[i] += a * mat(n, m) * src[m]->data()[i];
139 template<
typename scalar,
typename time>
142 return std::abs(*std::max_element(this->cbegin(), this->cend(),
143 [](scalar a, scalar b) {
return std::abs(a) < std::abs(b); } ));
147 template<
typename scalar,
typename time>
152 template<
typename scalar,
typename time>
158 template<
typename scalar,
typename time>
161 return make_shared<VectorEncapsulation<scalar, time>>(this->dofs());
165 template<
typename scalar,
typename time>
169 shared_ptr<VectorT> y = dynamic_pointer_cast<VectorT>(x);
174 template<
typename scalar,
typename time>
178 shared_ptr<const VectorT> y = dynamic_pointer_cast<
const VectorT>(x);
184 template<
typename scalar,
typename time>
187 auto& mpi = as_mpi(comm);
188 if (mpi.size() == 1) {
return; }
189 if (mpi.rank() == 0) {
return; }
191 if (this->recv_request != MPI_REQUEST_NULL) {
192 throw MPIError(
"a previous receive request is still open");
195 int src = (mpi.rank() - 1) % mpi.size();
196 ML_CLOG(DEBUG,
"Encap",
"non-blocking receiving from rank " << src <<
" with tag=" << tag);
197 int err = MPI_Irecv(this->
data(),
sizeof(scalar) * this->size(), MPI_CHAR,
198 src, tag, mpi.comm, &this->recv_request);
200 ML_CLOG(DEBUG,
"Encap",
"non-blocking received from rank " << src <<
" with tag=" << tag);
203 template<
typename scalar,
typename time>
206 auto& mpi = as_mpi(comm);
207 if (mpi.size() == 1) {
return; }
208 if (mpi.rank() == 0) {
return; }
211 int err = MPI_SUCCESS;
214 int src = (mpi.rank() - 1) % mpi.size();
215 ML_CLOG(DEBUG,
"Encap",
"blocking receive from rank " << src <<
" with tag=" << tag);
216 err = MPI_Recv(this->
data(),
sizeof(scalar) * this->size(), MPI_CHAR,
217 src, tag, mpi.comm, &stat);
219 ML_CLOG(DEBUG,
"Encap",
"received blocking from rank " << src <<
" with tag=" << tag <<
": " << stat);
221 if (this->recv_request != MPI_REQUEST_NULL) {
222 ML_CLOG(DEBUG,
"Encap",
"waiting on last receive request");
223 err = MPI_Wait(&(this->recv_request), &stat);
225 ML_CLOG(DEBUG,
"Encap",
"waited on last receive request: " << stat);
230 template<
typename scalar,
typename time>
233 auto& mpi = as_mpi(comm);
234 if (mpi.size() == 1) {
return; }
235 if (mpi.rank() == mpi.size() - 1) {
return; }
238 int err = MPI_SUCCESS;
239 int dest = (mpi.rank() + 1) % mpi.size();
242 ML_CLOG(DEBUG,
"Encap",
"blocking send to rank " << dest <<
" with tag=" << tag);
243 err = MPI_Send(this->
data(),
sizeof(scalar) * this->size(), MPI_CHAR, dest, tag, mpi.comm);
245 ML_CLOG(DEBUG,
"Encap",
"sent blocking to rank " << dest <<
" with tag=" << tag);
248 ML_CLOG(DEBUG,
"Encap",
"waiting on last send request to finish");
249 err = MPI_Wait(&(this->send_request), &stat);
251 ML_CLOG(DEBUG,
"Encap",
"waited on last send request: " << stat);
252 ML_CLOG(DEBUG,
"Encap",
"non-blocking sending to rank " << dest <<
" with tag=" << tag);
253 err = MPI_Isend(this->
data(),
sizeof(scalar) * this->size(), MPI_CHAR,
254 dest, tag, mpi.comm, &(this->send_request));
256 ML_CLOG(DEBUG,
"Encap",
"sent non-blocking to rank " << dest <<
" with tag=" << tag);
260 template<
typename scalar,
typename time>
263 auto& mpi = as_mpi(comm);
264 ML_CLOG(DEBUG,
"Encap",
"broadcasting");
265 int err = MPI_Bcast(this->
data(),
sizeof(scalar) * this->size(), MPI_CHAR,
266 comm->size()-1, mpi.comm);
268 ML_CLOG(DEBUG,
"Encap",
"broadcasted");
static void zero(vector< precision > &data)
virtual shared_ptr< Encapsulation< time > > create(const EncapType) override
Actual method to create Encapsulation object of specific type.
virtual void broadcast(ICommunicator *comm)
Broadcasting this data structure to all processes in comm.
virtual void saxpy(time a, shared_ptr< const Encapsulation< time >> x) override
Provides basic mathematical operation \( y+=ax \).
#define ML_CLOG(level, logger_id, x)
same as CLOG(level, logger, x) from easylogging++
virtual void recv(ICommunicator *comm, int tag, bool blocking)
Receive values to store in this data structure.
virtual ~VectorEncapsulation()
virtual void zero() override
Zeroes out all values of this data structure.
virtual void copy(shared_ptr< const Encapsulation< time >> x) override
Copies values from other into this data structure.
virtual time norm0() const override
Maximum norm of contained elements.
virtual void send(ICommunicator *comm, int tag, bool blocking)
Send values stored in this data structure.
Data/solution encapsulation.
VectorEncapsulation(const size_t size)
Abstract interface for communicators.
virtual void mat_apply(vector< shared_ptr< Encapsulation< time >>> dst, time a, Matrix< time > mat, vector< shared_ptr< Encapsulation< time >>> src, bool zero=true) override
Eigen::Matrix< scalar, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor > Matrix
static void check_mpi_error(const int err_code)
checks MPI error code
virtual void post(ICommunicator *comm, int tag)
VectorFactory(const size_t size)
static MPI_Status MPI_Status_factory()
creates and initializes a new empty MPI_Status object
VectorEncapsulation< scalar, time > & as_vector(shared_ptr< Encapsulation< time >> x)