KaMPIng 0.2.0
Flexible and (near) zero-overhead C++ bindings for MPI
Loading...
Searching...
No Matches
communicator.hpp
1// This file is part of KaMPIng.
2//
3// Copyright 2021-2022 The KaMPIng Authors
4//
5// KaMPIng is free software : you can redistribute it and/or modify it under the terms of the GNU Lesser General Public
6// License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later
7// version. KaMPIng is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
8// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
9// for more details.
10//
11// You should have received a copy of the GNU Lesser General Public License along with KaMPIng. If not, see
12// <https://www.gnu.org/licenses/>.
13
14#pragma once
15
16#include <algorithm>
17#include <cstddef>
18#include <cstdlib>
19
20#include <mpi.h>
21
22#include "error_handling.hpp"
25#include "kamping/group.hpp"
26#include "kamping/kassert/kassert.hpp"
29#include "kamping/mpi_ops.hpp"
32
33namespace kamping {
34
35// Needed by the plugin system to check if a plugin provides a callback function for MPI errors.
36KAMPING_MAKE_HAS_MEMBER(mpi_error_handler)
37
38/// @brief Wrapper for MPI communicator providing access to \c rank() and \c size() of the communicator. The \ref
39/// Communicator is also access point to all MPI communications provided by KaMPIng.
40/// @tparam DefaultContainerType The default container type to use for containers created by KaMPIng. Defaults to
41/// std::vector.
42/// @tparam Plugins Plugins adding functionality to KaMPIng. Plugins should be classes taking a <tt>Communicator</tt>
43/// template parameter and can assume that they are castable to <tt>Communicator</tt> from which they can
44/// call any function of <tt>kamping::Communicator</tt>. See <tt>test/plugin_tests.cpp</tt> for examples.
45template <
46 template <typename...> typename DefaultContainerType = std::vector,
47 template <typename, template <typename...> typename>
48 typename... Plugins>
49class Communicator : public Plugins<Communicator<DefaultContainerType, Plugins...>, DefaultContainerType>... {
50public:
51 /// @brief Type of the default container type to use for containers created inside operations of this communicator.
52 /// @tparam Args Arguments to the container type.
53 template <typename... Args>
55
56 /// @brief Default constructor not specifying any MPI communicator and using \c MPI_COMM_WORLD by default.
58
59 /// @brief Constructor where an MPI communicator has to be specified.
60 /// @param comm MPI communicator that is wrapped by this \c Communicator.
61 /// @param take_ownership Whether the Communicator should take ownership of comm, i.e. free it in the destructor.
63
64 /// @brief Constructor where an MPI communicator and the default root have to be specified.
65 /// @param comm MPI communicator that is wrapped by this \c Communicator.
66 /// @param root Default root that is used by MPI operations requiring a root.
67 /// @param take_ownership Whether the Communicator should take ownership of comm, i.e. free it in the destructor.
68 explicit Communicator(MPI_Comm comm, int root, bool take_ownership = false)
69 : _rank(get_mpi_rank(comm)),
70 _size(get_mpi_size(comm)),
71 _comm(comm),
72 _default_tag(0),
74 if (take_ownership) {
75 KAMPING_ASSERT(comm != MPI_COMM_WORLD, "Taking ownership of MPI_COMM_WORLD is not allowed.");
76 }
77 this->root(root);
78 }
79
80 /// @brief Copy constructor that duplicates the MPI_Comm and takes ownership of the newly created one in the copy.
81 /// @param other The Communicator to copy.
90
91 /// @brief Move constructor
92 /// @param other The Communicator to move.
94 : _rank(other._rank),
100 // This prevents freeing the communicator twice (once in other and once in this)
101 other._comm = MPI_COMM_NULL;
102 other._owns_mpi_comm = false;
103 }
104
105 /// @brief Destructor that frees the contained \c MPI_Comm if it is owned by the Communicator.
106 virtual ~Communicator() {
107 if (_owns_mpi_comm) {
109 }
110 }
111
112 /// @brief Move assignment operator.
113 /// @param other The Communicator to move.
115 swap(other);
116 return *this;
117 }
118
119 /// @brief Copy assignment operator. Behaves according to the copy constructor.
120 /// @param other The Communicator to copy.
123 swap(tmp);
124 return *this;
125 }
126
127 /// @brief Swaps the Communicator with another Communicator.
128 /// @param other The Communicator to swap with.
130 std::swap(_rank, other._rank);
131 std::swap(_size, other._size);
132 std::swap(_comm, other._comm);
133 std::swap(_default_tag, other._default_tag);
134 std::swap(_root, other._root);
135 std::swap(_owns_mpi_comm, other._owns_mpi_comm);
136 }
137
138 /// @brief Terminates MPI execution environment (on all processes in this Communicator).
139 /// Beware of MPI implementations who might terminate all processes, whether they are in this communicator or not.
140 ///
141 /// @param errorcode Error code to return to invoking environment.
142 void abort(int errorcode = 1) const {
144 this->mpi_error_hook(err, "MPI_Abort");
145 }
146
147 /// @brief Rank of the current MPI process in the communicator as <tt>int</tt>.
148 /// @return Rank of the current MPI process in the communicator as <tt>int</tt>.
149 [[nodiscard]] int rank_signed() const {
151 }
152
153 /// @brief Rank of the current MPI process in the communicator as <tt>size_t</tt>.
154 /// @return Rank of the current MPI process in the communicator as <tt>size_t</tt>.
155 [[nodiscard]] size_t rank() const {
156 return _rank;
157 }
158
159 /// @brief Number of MPI processes in this communicator as <tt>int</tt>.
160 /// @return Number of MPI processes in this communicator <tt>int</tt>.
161 [[nodiscard]] int size_signed() const {
163 }
164
165 /// @brief Number of MPI processes in this communicator as <tt>size_t</tt>.
166
167 /// @return Number of MPI processes in this communicator as <tt>size_t</tt>.
168 [[nodiscard]] size_t size() const {
169 return _size;
170 }
171
172 /// @brief Number of NUMA nodes (different shared memory regions) in this communicator.
173 /// This operation is expensive (communicator splitting and communication). You should cache the result if you need
174 /// it multiple times.
175 /// @return Number of compute nodes (hostnames) in this communicator.
176 [[nodiscard]] size_t num_numa_nodes() const;
177
178 /// @brief Get this 'processor's' name using \c MPI_Get_processor_name.
179 /// @return This 'processor's' name. Nowadays, this oftentimes is the hostname.
180 std::string processor_name() const {
181 // Get the name of this node.
182 int my_len;
184
186 this->mpi_error_hook(ret, "MPI_Get_processor_name");
187 return std::string(my_name, asserting_cast<size_t>(my_len));
188 }
189
190 /// @brief MPI communicator corresponding to this communicator.
191 /// @return MPI communicator corresponding to this communicator.
193 return _comm;
194 }
195
196 /// @brief Disowns the wrapped MPI_Comm, i.e. it will not be freed in the destructor.
197 /// @return MPI communicator corresponding to this communicator.
202
203 /// @brief Set a new default tag used in point to point communication. The initial value is 0.
204 void default_tag(int const default_tag) {
207 "invalid tag " << default_tag << ", must be in range [0, " << Environment<>::tag_upper_bound() << "]"
208 );
210 }
211
212 /// @brief Default tag used in point to point communication. The initial value is 0.
213 [[nodiscard]] int default_tag() const {
214 return _default_tag;
215 }
216
217 /// @brief Set a new root for MPI operations that require a root.
218 /// @param new_root The new default root.
219 void root(int const new_root) {
222 "invalid root rank " << new_root << " in communicator of size " << size()
223 );
225 }
226
227 /// @brief Set a new root for MPI operations that require a root.
228 /// @param new_root The new default root.
229 void root(size_t const new_root) {
232 "invalid root rank " << new_root << " in communicator of size " << size()
233 );
234 _root = new_root;
235 }
236
237 /// @brief Default root for MPI operations that require a root as <tt>size_t</tt>.
238 /// @return Default root for MPI operations that require a root as <tt>size_t</tt>.
239 [[nodiscard]] size_t root() const {
240 return _root;
241 }
242
243 /// @brief Default root for MPI operations that require a root as <tt>int</tt>.
244 /// @return Default root for MPI operations that require a root as <tt>int</tt>.
245 [[nodiscard]] int root_signed() const {
247 }
248
249 /// @brief Check if this rank is the root rank.
250 /// @return Return \c true if this rank is the root rank.
251 /// @param root The custom root's rank.
252 [[nodiscard]] bool is_root(int const root) const {
253 return rank() == asserting_cast<size_t>(root);
254 }
255
256 /// @brief Check if this rank is the root rank.
257 /// @return Return \c true if this rank is the root rank.
258 /// @param root The custom root's rank.
259 [[nodiscard]] bool is_root(size_t const root) const {
260 return rank() == root;
261 }
262
263 /// @brief Check if this rank is the root rank.
264 /// @return Return \c true if this rank is the root rank.
265 [[nodiscard]] bool is_root() const {
266 return is_root(root());
267 }
268
269 /// @brief Split the communicator in different colors.
270 /// @param color All ranks that have the same color will be in the same new communicator.
271 /// @param key By default, ranks in the new communicator are determined by the underlying MPI library (if \c key is
272 /// 0). Otherwise, ranks are ordered the same way the keys are ordered.
273 /// @return \ref Communicator wrapping the newly split MPI communicator.
274 [[nodiscard]] Communicator split(int const color, int const key = 0) const {
277 return Communicator(new_comm, true);
278 }
279
280 /// @brief Split the communicator by the specified type (e.g., shared memory)
281 ///
282 /// @param type The only standard-conform value is \c MPI_COMM_TYPE_SHARED but your MPI implementation might support
283 /// other types. For example: \c OMPI_COMM_TYPE_L3CACHE.
284 [[nodiscard]] Communicator split_by_type(int const type) const {
285 // MPI_COMM_TYPE_HW_GUIDED is only available starting with MPI-4.0
286 // MPI_Info info;
287 // MPI_Info_create(&info);
288 // MPI_Info_set(info, "mpi_hw_resource_type", "NUMANode");
289 // auto ret = MPI_Comm_split_type(_comm, MPI_COMM_TYPE_HW_GUIDED, rank_signed(), info, &newcomm);
290
293 this->mpi_error_hook(ret, "MPI_Comm_split_type");
294 return Communicator(new_comm, true);
295 }
296
297 /// @brief Split the communicator into NUMA nodes.
298 /// @return \ref Communicator wrapping the newly split MPI communicator. Each rank will be in the communicator
299 /// corresponding to its NUMA node.
303
304 /// @brief Return the group associated with this communicator.
305 /// @return The group associated with this communicator.
306 [[nodiscard]] Group group() const {
307 return Group(*this);
308 }
309
310 /// @brief Create subcommunicators.
311 ///
312 /// This method requires globally available information on the ranks in the subcommunicators.
313 /// A rank \c r must know all other ranks which will be part of the subcommunicator to which \c r will belong.
314 /// This information can be used by the MPI implementation to execute a communicator split more efficiently.
315 /// The method must be called by all ranks in the communicator.
316 ///
317 /// @tparam Ranks Contiguous container storing integers.
318 /// @param ranks_in_own_group Contains the ranks that will be part of this rank's new (sub-)communicator.
319 /// All ranks specified in \c ranks_in_own_group must have an identical \c ranks_in_own_group argument. Furthermore,
320 /// this set must not be empty.
321 /// @return \ref Communicator wrapping the newly split MPI communicator.
322 template <typename Ranks>
324 static_assert(std::is_same_v<typename Ranks::value_type, int>, "Ranks must be of type int");
326 ranks_in_own_group.size() > 0ull,
327 "The set of ranks to include in the new subcommunicator must not be empty."
328 );
329 auto ranks_contain_own_rank = [&]() {
330 return std::find(ranks_in_own_group.begin(), ranks_in_own_group.end(), rank()) != ranks_in_own_group.end();
331 };
334 "The ranks to include in the new subcommunicator must contain own rank."
335 );
342 ranks_in_own_group.data(),
344 );
347 return Communicator(new_comm, true);
348 }
349
350 /// @brief Create (sub-)communicators using a sparse representation for the ranks contained in the
351 /// subcommunicators.
352 ///
353 /// This split method requires globally available information on the ranks in the split communicators.
354 /// A rank \c r must know all other ranks which will be part of the subcommunicator to which \c r will belong.
355 /// This information can be used by the MPI implementation to execute a communicator split more efficiently.
356 /// The method must be called by all ranks in the communicator.
357 ///
358 /// @param rank_ranges Contains the ranks that will be part of this rank's new (sub-)communicator in a sparse
359 /// representation via rank ranges each consisting of (first rank, last rank and stride).
360 /// All ranks specified in \c ranks_in_own_group must have
361 /// an identical \c ranks_in_own_group argument. Furthermore, this set must not be empty.
362 /// @return \ref Communicator wrapping the newly split MPI communicator.
365 rank_ranges.size() > 0ull,
366 "The set of ranks to include in the new subcommunicator must not be empty."
367 );
369 rank_ranges.contains(rank_signed()),
370 "The ranks to include in the new subcommunicator must contain own rank."
371 );
378 return Communicator(new_comm, true);
379 }
380
381 ///@brief Compare this communicator with another given communicator. Uses \c MPI_Comm_compare internally.
382 ///
383 ///@param other_comm Communicator with which this communicator is compared.
384 ///@return Return whether compared communicators are identical, congruent, similar or unequal.
386 int result;
387 MPI_Comm_compare(_comm, other_comm.mpi_communicator(), &result);
388 return static_cast<CommunicatorComparisonResult>(result);
389 }
390
391 /// @brief Convert a rank from this communicator to the rank in another communicator.
392 /// @param rank The rank in this communicator
393 /// @param other_comm The communicator to convert the rank to
394 /// @return The rank in other_comm
404
405 /// @brief Convert a rank from another communicator to the rank in this communicator.
406 /// @param rank The rank in other_comm
407 /// @param other_comm The communicator to convert the rank from
408 /// @return The rank in this communicator
409 [[nodiscard]] int convert_rank_from_communicator(int const rank, Communicator const& other_comm) const {
410 return other_comm.convert_rank_to_communicator(rank, *this);
411 }
412
413 /// @brief Computes a rank that is \c distance ranks away from this MPI thread's current rank and checks if this is
414 /// valid rank in this communicator.
415 ///
416 /// The resulting rank is valid, iff it is at least zero and less than this communicator's size. The \c distance can
417 /// be negative. Unlike \ref rank_shifted_cyclic(), this does not guarantee a valid rank but can indicate if the
418 /// resulting rank is not valid.
419 /// @param distance Amount current rank is decreased or increased by.
420 /// @return Rank if rank is in [0, size of communicator) and ASSERT/EXCEPTION? otherwise.
421 [[nodiscard]] size_t rank_shifted_checked(int const distance) const {
422 int const result = rank_signed() + distance;
423 THROWING_KAMPING_ASSERT(is_valid_rank(result), "invalid shifted rank " << result);
425 }
426
427 /// @brief Computes a rank that is some ranks apart from this MPI thread's rank modulo the communicator's size.
428 ///
429 /// When we need to compute a rank that is greater (or smaller) than this communicator's rank, we can use this
430 /// function. It computes the rank that is \c distance ranks appart. However, this function always returns a valid
431 /// rank, as it computes the rank in a circular fashion, i.e., \f$ new\_rank=(rank + distance) \% size \f$.
432 /// @param distance Distance of the new rank to the rank of this MPI thread.
433 /// @return The circular rank that is \c distance ranks apart from this MPI threads rank.
434 [[nodiscard]] size_t rank_shifted_cyclic(int const distance) const {
435 int const capped_distance = distance % size_signed();
437 }
438
439 /// @brief Checks if a rank is a valid rank for this communicator, i.e., if the rank is in [0, size).
440 /// @return \c true if rank in [0,size) and \c false otherwise.
441 [[nodiscard]] bool is_valid_rank(int const rank) const {
442 return rank >= 0 && rank < size_signed();
443 }
444
445 /// @brief Checks if a rank is a valid rank for this communicator, i.e., if the rank is in [0, size).
446 /// @return \c true if rank in [0,size) and \c false otherwise.
447 [[nodiscard]] bool is_valid_rank(size_t const rank) const {
448 return rank < size();
449 }
450
451 /// @brief If <tt>error_code != MPI_SUCCESS</tt>, searchs the plugins for a \a public <tt>mpi_error_handler(const
452 /// int error_code, std::string& callee)</tt> member. Searches the plugins front to back and calls the \a first
453 /// handler found. If no handler is found, calls the default error hook. If error code is \c MPI_SUCCESS, does
454 /// nothing.
455 void mpi_error_hook(int const error_code, std::string const& callee) const {
456 if (error_code != MPI_SUCCESS) {
457 mpi_error_hook_impl<Plugins...>(error_code, callee);
458 }
459 }
460
461 /// @brief Default MPI error callback. Depending on <tt>KASSERT_EXCEPTION_MODE</tt> either throws a \ref
462 /// MpiErrorException if \c error_code != \c MPI_SUCCESS or fails an assertion.
463 void mpi_error_default_handler(int const error_code, std::string const& function_name) const {
465 error_code == MPI_SUCCESS,
466 function_name << " failed!",
468 error_code
469 );
470 }
471
472 template <typename... Args>
473 void send(Args... args) const;
474
475 template <typename... Args>
476 void bsend(Args... args) const;
477
478 template <typename... Args>
479 void ssend(Args... args) const;
480
481 template <typename... Args>
482 void rsend(Args... args) const;
483
484 template <typename... Args>
485 auto isend(Args... args) const;
486
487 template <typename... Args>
488 auto ibsend(Args... args) const;
489
490 template <typename... Args>
491 auto issend(Args... args) const;
492
493 template <typename... Args>
494 auto irsend(Args... args) const;
495
496 template <typename... Args>
497 auto probe(Args... args) const;
498
499 template <typename... Args>
500 auto iprobe(Args... args) const;
501
502 template <typename recv_value_type_tparam = kamping::internal::unused_tparam, typename... Args>
503 auto sendrecv(Args... args) const;
504
505 template <typename recv_value_type_tparam = kamping::internal::unused_tparam, typename... Args>
506 auto recv(Args... args) const;
507
508 template <typename recv_value_type_tparam, typename... Args>
509 auto recv_single(Args... args) const;
510
511 template <typename recv_value_type_tparam = kamping::internal::unused_tparam, typename... Args>
512 auto try_recv(Args... args) const;
513
514 template <typename recv_value_type_tparam = kamping::internal::unused_tparam, typename... Args>
515 auto irecv(Args... args) const;
516
517 template <typename... Args>
518 auto alltoall(Args... args) const;
519
520 template <typename... Args>
521 auto alltoall_inplace(Args... args) const;
522
523 template <typename... Args>
524 auto alltoallv(Args... args) const;
525
526 template <typename recv_value_type_tparam = kamping::internal::unused_tparam, typename... Args>
527 auto scatter(Args... args) const;
528
529 template <typename recv_value_type_tparam = kamping::internal::unused_tparam, typename... Args>
530 auto scatter_single(Args... args) const;
531
532 template <typename recv_value_type_tparam = kamping::internal::unused_tparam, typename... Args>
533 auto scatterv(Args... args) const;
534
535 template <typename... Args>
536 auto reduce(Args... args) const;
537
538 template <typename... Args>
539 auto reduce_single(Args... args) const;
540
541 template <typename... Args>
542 auto scan(Args... args) const;
543
544 template <typename... Args>
545 auto scan_inplace(Args... args) const;
546
547 template <typename... Args>
548 auto scan_single(Args... args) const;
549
550 template <typename... Args>
551 auto exscan(Args... args) const;
552
553 template <typename... Args>
554 auto exscan_inplace(Args... args) const;
555
556 template <typename... Args>
557 auto exscan_single(Args... args) const;
558
559 template <typename... Args>
560 auto allreduce(Args... args) const;
561
562 template <typename... Args>
563 auto allreduce_inplace(Args... args) const;
564
565 template <typename... Args>
566 auto allreduce_single(Args... args) const;
567
568 template <typename... Args>
569 auto iallreduce(Args... args) const;
570
571 template <typename... Args>
572 auto gather(Args... args) const;
573
574 template <typename... Args>
575 auto gatherv(Args... args) const;
576
577 template <typename... Args>
578 auto allgather(Args... args) const;
579
580 template <typename... Args>
581 auto allgather_inplace(Args... args) const;
582
583 template <typename... Args>
584 auto allgatherv(Args... args) const;
585
586 template <typename recv_value_type_tparam = kamping::internal::unused_tparam, typename... Args>
587 auto bcast(Args... args) const;
588
589 template <typename recv_value_type_tparam = kamping::internal::unused_tparam, typename... Args>
590 auto bcast_single(Args... args) const;
591
592 template <typename... Args>
593 void barrier(Args... args) const;
594
595 template <typename... Args>
596 auto ibarrier(Args... args) const;
597
598 template <typename Value>
599 bool is_same_on_all_ranks(Value const& value) const;
600
601private:
602 /// @brief Compute the rank of the current MPI process computed using \c MPI_Comm_rank.
603 /// @return Rank of the current MPI process in the communicator.
604 size_t get_mpi_rank(MPI_Comm comm) const {
607 "communicator must be initialized with a valid MPI communicator"
608 );
609
610 int rank;
611 MPI_Comm_rank(comm, &rank);
612 return asserting_cast<size_t>(rank);
613 }
614
615 /// @brief Compute the number of MPI processes in this communicator using \c MPI_Comm_size.
616 /// @return Size of the communicator.
617 size_t get_mpi_size(MPI_Comm comm) const {
618 THROWING_KAMPING_ASSERT(
619 comm != MPI_COMM_NULL,
620 "communicator must be initialized with a valid MPI communicator"
621 );
622
623 int size;
624 MPI_Comm_size(comm, &size);
625 return asserting_cast<size_t>(size);
626 }
627
628 /// See \ref mpi_error_hook
629 template <
630 template <typename, template <typename...> typename>
631 typename Plugin,
632 template <typename, template <typename...> typename>
633 typename... RemainingPlugins>
634 void mpi_error_hook_impl(int const error_code, std::string const& callee) const {
635 using PluginType = Plugin<Communicator<DefaultContainerType, Plugins...>, DefaultContainerType>;
636 if constexpr (has_member_mpi_error_handler_v<PluginType, int, std::string const&>) {
637 static_cast<PluginType const&>(*this).mpi_error_handler(error_code, callee);
638 } else {
639 if constexpr (sizeof...(RemainingPlugins) == 0) {
640 mpi_error_hook_impl<void>(error_code, callee);
641 } else {
642 mpi_error_hook_impl<RemainingPlugins...>(error_code, callee);
643 }
644 }
645 }
646
647 template <typename = void>
648 void mpi_error_hook_impl(int const error_code, std::string const& callee) const {
649 mpi_error_default_handler(error_code, callee);
650 }
651
652protected:
653 size_t _rank; ///< Rank of the MPI process in this communicator.
654 size_t _size; ///< Number of MPI processes in this communicator.
655 MPI_Comm _comm; ///< Corresponding MPI communicator.
656
657 size_t _root; ///< Default root for MPI operations that require a root.
658 int _default_tag; ///< Default tag value used in point to point communication.
659
660 bool _owns_mpi_comm; ///< Whether the Communicator Objects owns the contained MPI_Comm, i.e. whether it is
661 ///< allowed to free it in the destructor.
662
663}; // class communicator
664
665/// @brief A basic KaMPIng Communicator that uses std::vector when creating new buffers.
667
668/// @brief Gets a \c const reference to a \ref BasicCommunicator for \c MPI_COMM_WORLD.
669///
670/// Useful if you want access to KaMPIng's base functionality without keeping an instance of \ref Communicator or
671/// constructing a new one on the fly.
672///
673/// @return A \c const reference to a \ref BasicCommunicator for \c MPI_COMM_WORLD.
674inline BasicCommunicator const& comm_world() {
675 // By using a static variable in a function here, this gets constructed on first use.
676 static BasicCommunicator const comm_world;
677 return comm_world;
678}
679
680/// @brief Gets the rank in \c MPI_COMM_WORLD as size_t.
681///
682/// @return The rank in \c MPI_COMM_WORLD.
683inline size_t world_rank() {
684 return comm_world().rank();
685}
686
687/// @brief Gets the rank in \c MPI_COMM_WORLD as int.
688///
689/// @return The rank in \c MPI_COMM_WORLD.
690inline int world_rank_signed() {
691 return comm_world().rank_signed();
692}
693
694/// @brief Gets the size of \c MPI_COMM_WORLD as size_t.
695///
696/// @return The size of \c MPI_COMM_WORLD.
697inline size_t world_size() {
698 return comm_world().size();
699}
700
701/// @brief Gets the size of \c MPI_COMM_WORLD as int.
702///
703/// @return The size of \c MPI_COMM_WORLD.
704inline int world_size_signed() {
705 return comm_world().size_signed();
706}
707
708} // namespace kamping
Helper functions that make casts safer.
Wrapper for MPI communicator providing access to rank() and size() of the communicator....
Definition communicator.hpp:49
size_t _rank
Rank of the MPI process in this communicator.
Definition communicator.hpp:653
Communicator create_subcommunicators(RankRanges const &rank_ranges) const
Create (sub-)communicators using a sparse representation for the ranks contained in the subcommunicat...
Definition communicator.hpp:363
Communicator(Communicator &&other)
Move constructor.
Definition communicator.hpp:93
size_t rank_shifted_cyclic(int const distance) const
Computes a rank that is some ranks apart from this MPI thread's rank modulo the communicator's size.
Definition communicator.hpp:434
bool is_valid_rank(size_t const rank) const
Checks if a rank is a valid rank for this communicator, i.e., if the rank is in [0,...
Definition communicator.hpp:447
bool is_same_on_all_ranks(Value const &value) const
Checks if all ranks provide the same value to this collective.
Definition is_same_on_all_ranks.hpp:41
MPI_Comm _comm
Corresponding MPI communicator.
Definition communicator.hpp:655
int convert_rank_from_communicator(int const rank, Communicator const &other_comm) const
Convert a rank from another communicator to the rank in this communicator.
Definition communicator.hpp:409
void abort(int errorcode=1) const
Terminates MPI execution environment (on all processes in this Communicator). Beware of MPI implement...
Definition communicator.hpp:142
size_t rank_shifted_checked(int const distance) const
Computes a rank that is distance ranks away from this MPI thread's current rank and checks if this is...
Definition communicator.hpp:421
Communicator()
Default constructor not specifying any MPI communicator and using MPI_COMM_WORLD by default.
Definition communicator.hpp:57
void root(int const new_root)
Set a new root for MPI operations that require a root.
Definition communicator.hpp:219
MPI_Comm disown_mpi_communicator()
Disowns the wrapped MPI_Comm, i.e. it will not be freed in the destructor.
Definition communicator.hpp:198
Communicator split(int const color, int const key=0) const
Split the communicator in different colors.
Definition communicator.hpp:274
size_t rank() const
Rank of the current MPI process in the communicator as size_t.
Definition communicator.hpp:155
bool is_root() const
Check if this rank is the root rank.
Definition communicator.hpp:265
int _default_tag
Default tag value used in point to point communication.
Definition communicator.hpp:658
size_t _size
Number of MPI processes in this communicator.
Definition communicator.hpp:654
virtual ~Communicator()
Destructor that frees the contained MPI_Comm if it is owned by the Communicator.
Definition communicator.hpp:106
int convert_rank_to_communicator(int const rank, Communicator const &other_comm) const
Convert a rank from this communicator to the rank in another communicator.
Definition communicator.hpp:395
Communicator split_to_shared_memory() const
Split the communicator into NUMA nodes.
Definition communicator.hpp:300
bool is_root(size_t const root) const
Check if this rank is the root rank.
Definition communicator.hpp:259
MPI_Comm mpi_communicator() const
MPI communicator corresponding to this communicator.
Definition communicator.hpp:192
int rank_signed() const
Rank of the current MPI process in the communicator as int.
Definition communicator.hpp:149
size_t root() const
Default root for MPI operations that require a root as size_t.
Definition communicator.hpp:239
void root(size_t const new_root)
Set a new root for MPI operations that require a root.
Definition communicator.hpp:229
Communicator create_subcommunicators(Ranks const &ranks_in_own_group) const
Create subcommunicators.
Definition communicator.hpp:323
Communicator & operator=(Communicator &&other)
Move assignment operator.
Definition communicator.hpp:114
int root_signed() const
Default root for MPI operations that require a root as int.
Definition communicator.hpp:245
bool is_root(int const root) const
Check if this rank is the root rank.
Definition communicator.hpp:252
size_t _root
Default root for MPI operations that require a root.
Definition communicator.hpp:657
Communicator(MPI_Comm comm, int root, bool take_ownership=false)
Constructor where an MPI communicator and the default root have to be specified.
Definition communicator.hpp:68
size_t size() const
Number of MPI processes in this communicator as size_t.
Definition communicator.hpp:168
Communicator & operator=(Communicator const &other)
Copy assignment operator. Behaves according to the copy constructor.
Definition communicator.hpp:121
int default_tag() const
Default tag used in point to point communication. The initial value is 0.
Definition communicator.hpp:213
int size_signed() const
Number of MPI processes in this communicator as int.
Definition communicator.hpp:161
CommunicatorComparisonResult compare(Communicator const &other_comm) const
Compare this communicator with another given communicator. Uses MPI_Comm_compare internally.
Definition communicator.hpp:385
bool is_valid_rank(int const rank) const
Checks if a rank is a valid rank for this communicator, i.e., if the rank is in [0,...
Definition communicator.hpp:441
bool _owns_mpi_comm
Definition communicator.hpp:660
size_t num_numa_nodes() const
Number of NUMA nodes (different shared memory regions) in this communicator. This operation is expens...
Definition num_numa_nodes.hpp:11
std::string processor_name() const
Get this 'processor's' name using MPI_Get_processor_name.
Definition communicator.hpp:180
void mpi_error_hook(int const error_code, std::string const &callee) const
If error_code != MPI_SUCCESS, searchs the plugins for a public mpi_error_handler(const int error_code...
Definition communicator.hpp:455
void mpi_error_default_handler(int const error_code, std::string const &function_name) const
Default MPI error callback. Depending on KASSERT_EXCEPTION_MODE either throws a MpiErrorException if ...
Definition communicator.hpp:463
Group group() const
Return the group associated with this communicator.
Definition communicator.hpp:306
Communicator split_by_type(int const type) const
Split the communicator by the specified type (e.g., shared memory)
Definition communicator.hpp:284
void default_tag(int const default_tag)
Set a new default tag used in point to point communication. The initial value is 0.
Definition communicator.hpp:204
Communicator(Communicator const &other)
Copy constructor that duplicates the MPI_Comm and takes ownership of the newly created one in the cop...
Definition communicator.hpp:82
void swap(Communicator &other)
Swaps the Communicator with another Communicator.
Definition communicator.hpp:129
Communicator(MPI_Comm comm, bool take_ownership=false)
Constructor where an MPI communicator has to be specified.
Definition communicator.hpp:62
Wrapper for MPI functions that don't require a communicator. If the template parameter init_finalize_...
Definition environment.hpp:52
A group of MPI processes.
Definition group.hpp:36
STL-compatible allocator for requesting memory using the builtin MPI allocator.
Definition allocator.hpp:32
The exception type used when an MPI call did not return MPI_SUCCESS.
Definition error_handling.hpp:49
RankRanges encapsulate multiple rank ranges which are used in functions like MPI_Group_range_incl/exc...
Definition rank_ranges.hpp:36
Wrapper for MPI functions that don't require a communicator.
Code for error handling.
An abstraction around MPI_Group.
auto gather(Args... args) const
Wrapper for MPI_Gather.
Definition gather.hpp:80
auto exscan_inplace(Args... args) const
Wrapper for the in-place version of Communicator::exscan().
Definition exscan.hpp:247
auto ibarrier(Args... args) const
Perform a non-blocking barrier synchronization on this communicator using MPI_Ibarrier....
Definition ibarrier.hpp:47
auto scan_single(Args... args) const
Wrapper for MPI_Scan for single elements.
Definition scan.hpp:279
auto exscan_single(Args... args) const
Wrapper for MPI_exscan for single elements.
Definition exscan.hpp:377
auto gatherv(Args... args) const
Wrapper for MPI_Gatherv.
Definition gather.hpp:222
auto allgather_inplace(Args... args) const
Wrapper for the in-place version of MPI_Allgather.
Definition allgather.hpp:209
void barrier(Args... args) const
Perform a MPI_Barrier on this communicator.
Definition barrier.hpp:34
auto bcast(Args... args) const
Wrapper for MPI_Bcast.
Definition bcast.hpp:75
auto scan_inplace(Args... args) const
Wrapper for the in-place version of Communicator::scan().
Definition scan.hpp:191
auto scatter(Args... args) const
Wrapper for MPI_Scatter.
Definition scatter.hpp:86
auto scan(Args... args) const
Wrapper for MPI_Scan.
Definition scan.hpp:77
auto reduce(Args... args) const
Wrapper for MPI_Reduce.
Definition reduce.hpp:76
auto allreduce_inplace(Args... args) const
Wrapper for the in-place version of Communicator::allreduce().
Definition allreduce.hpp:195
auto reduce_single(Args... args) const
Wrapper for MPI_Reduce.
Definition reduce.hpp:194
auto allreduce(Args... args) const
Wrapper for MPI_Allreduce; which is semantically a reduction followed by a broadcast.
Definition allreduce.hpp:75
auto bcast_single(Args... args) const
Wrapper for MPI_Bcast.
Definition bcast.hpp:246
auto allgather(Args... args) const
Wrapper for MPI_Allgather.
Definition allgather.hpp:78
auto alltoall(Args... args) const
Wrapper for MPI_Alltoall.
Definition alltoall.hpp:79
auto allgatherv(Args... args) const
Wrapper for MPI_Allgatherv.
Definition allgather.hpp:326
auto exscan(Args... args) const
Wrapper for MPI_Exscan.
Definition exscan.hpp:84
auto alltoall_inplace(Args... args) const
Wrapper for the in-place version of Communicator::alltoall.
Definition alltoall.hpp:218
auto alltoallv(Args... args) const
Wrapper for MPI_Alltoallv.
Definition alltoall.hpp:328
auto scatterv(Args... args) const
Wrapper for MPI_Scatterv.
Definition scatter.hpp:338
auto iallreduce(Args... args) const
Wrapper for MPI_Iallreduce.
Definition iallreduce.hpp:76
auto scatter_single(Args... args) const
Definition scatter.hpp:250
auto allreduce_single(Args... args) const
Wrapper for MPI_Allreduce; which is semantically a reduction followed by a broadcast.
Definition allreduce.hpp:278
auto ibsend(Args... args) const
Convenience wrapper for MPI_Ibsend. Calls kamping::Communicator::isend() with the appropriate send mo...
Definition isend.hpp:205
auto issend(Args... args) const
Convenience wrapper for MPI_Issend. Calls kamping::Communicator::isend() with the appropriate send mo...
Definition isend.hpp:217
void bsend(Args... args) const
Convenience wrapper for MPI_Bsend. Calls kamping::Communicator::send() with the appropriate send mode...
Definition send.hpp:182
void send(Args... args) const
Wrapper for MPI_Send.
Definition send.hpp:66
auto isend(Args... args) const
Wrapper for MPI_Isend.
Definition isend.hpp:77
auto iprobe(Args... args) const
Wrapper for MPI_Iprobe.
Definition iprobe.hpp:60
void rsend(Args... args) const
Convenience wrapper for MPI_Rsend. Calls kamping::Communicator::send() with the appropriate send mode...
Definition send.hpp:206
auto probe(Args... args) const
Wrapper for MPI_Probe.
Definition probe.hpp:57
auto recv(Args... args) const
Wrapper for MPI_Recv.
Definition recv.hpp:81
auto recv_single(Args... args) const
Convience wrapper for receiving single values via MPI_Recv.
Definition recv.hpp:219
auto try_recv(Args... args) const
Receives a message if one is available.
Definition try_recv.hpp:79
auto sendrecv(Args... args) const
Wrapper for MPI_Sendrecv.
Definition sendrecv.hpp:94
auto irecv(Args... args) const
Wrapper for MPI_Recv.
Definition irecv.hpp:82
void ssend(Args... args) const
Convenience wrapper for MPI_Ssend. Calls kamping::Communicator::send() with the appropriate send mode...
Definition send.hpp:194
auto irsend(Args... args) const
Convenience wrapper for MPI_Irsend. Calls kamping::Communicator::isend() with the appropriate send mo...
Definition isend.hpp:229
#define KAMPING_MAKE_HAS_MEMBER(Member)
Macro for generating has_member_xxx and has_member_xxx_v templates. They return true if the type give...
Definition has_member.hpp:91
Wrapper for MPI constants.
CommunicatorComparisonResult
Wraps the possible results when calling MPI_Comm_compare on two MPI communicators comm1 and comm2.
Definition mpi_constants.hpp:22
Utility that maps C++ types to types that can be understood by MPI.
Definitions for builtin MPI operations.
Factory methods for buffer wrappers.
Defines utility classes for communicator creation using range based ranks descriptions.
An unused template parameter.
Definition named_parameters.hpp:36