/usr/include/dune/common/parallel/collectivecommunication.hh is in libdune-common-dev 2.5.1-1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 | // -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
// vi: set et ts=4 sw=2 sts=2:
#ifndef DUNE_COLLECTIVECOMMUNICATION_HH
#define DUNE_COLLECTIVECOMMUNICATION_HH
/*!
\file
\brief Implements an utility class that provides
collective communication methods for sequential programs.
\ingroup ParallelCommunication
*/
#include <iostream>
#include <complex>
#include <algorithm>
#include <dune/common/binaryfunctions.hh>
#include <dune/common/exceptions.hh>
/*! \defgroup ParallelCommunication Parallel Communication
\ingroup Common
\brief Abstractions for paralle computing
Dune offers an abstraction to the basic methods of parallel
communication. It allows one to switch parallel features on and off,
without changing the code. This is done using either CollectiveCommunication
or MPICollectiveCommunication.
*/
/*!
\file
\brief An abstraction to the basic methods of parallel communication,
following the message-passing paradigm.
\ingroup ParallelCommunication
*/
namespace Dune
{
/* define some type that definitely differs from MPI_Comm */
struct No_Comm {};
/*! @brief Collective communication interface and sequential default implementation
CollectiveCommunication offers an abstraction to the basic methods
of parallel communication, following the message-passing
paradigm. It allows one to switch parallel features on and off, without
changing the code. Currently only MPI and sequential code are
supported.
A CollectiveCommunication object is returned by all grids (also
the sequential ones) in order to allow code to be written in
a transparent way for sequential and parallel grids.
This class provides a default implementation for sequential grids.
The number of processes involved is 1, any sum, maximum, etc. returns
just its input argument and so on.
In specializations one can implement the real thing using appropriate
communication functions, e.g. there exists an implementation using
the Message Passing %Interface (MPI), see Dune::CollectiveCommunication<MPI_Comm>.
Moreover, the communication subsystem used by an implementation
is not visible in the interface, i.e. Dune grid implementations
are not restricted to MPI.
\tparam Communicator The communicator type used by your message-passing implementation.
For MPI this will be MPI_Comm. For sequential codes there is the dummy communicator No_Comm.
It is assumed that if you want to specialize the CollectiveCommunication class for a
message-passing system other than MPI, that message-passing system will have something
equivalent to MPI communicators.
\ingroup ParallelCommunication
*/
template<typename Communicator>
class CollectiveCommunication
{
public:
//! Construct default object
CollectiveCommunication()
{}
/** \brief Constructor with a given communicator
*
* As this is implementation for the sequential setting, the communicator is a dummy and simply discarded.
*/
CollectiveCommunication (const Communicator&)
{}
//! Return rank, is between 0 and size()-1
int rank () const
{
return 0;
}
//! Number of processes in set, is greater than 0
int size () const
{
return 1;
}
/** @brief Compute the sum of the argument over all processes and
return the result in every process. Assumes that T has an operator+
*/
template<typename T>
T sum (T& in) const // MPI does not know about const :-(
{
return in;
}
/** @brief Compute the sum over all processes for each component of an array and return the result
in every process. Assumes that T has an operator+
@returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
*/
template<typename T>
int sum (T* inout, int len) const
{
return 0;
}
/** @brief Compute the product of the argument over all processes and
return the result in every process. Assumes that T has an operator*
*/
template<typename T>
T prod (T& in) const // MPI does not know about const :-(
{
return in;
}
/** @brief Compute the product over all processes
for each component of an array and return the result
in every process. Assumes that T has an operator*
@returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
*/
template<typename T>
int prod (T* inout, int len) const
{
return 0;
}
/** @brief Compute the minimum of the argument over all processes and
return the result in every process. Assumes that T has an operator<
*/
template<typename T>
T min (T& in) const // MPI does not know about const :-(
{
return in;
}
/** @brief Compute the minimum over all processes
for each component of an array and return the result
in every process. Assumes that T has an operator<
@returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
*/
template<typename T>
int min (T* inout, int len) const
{
return 0;
}
/** @brief Compute the maximum of the argument over all processes and
return the result in every process. Assumes that T has an operator<
*/
template<typename T>
T max (T& in) const // MPI does not know about const :-(
{
return in;
}
/** @brief Compute the maximum over all processes
for each component of an array and return the result
in every process. Assumes that T has an operator<
@returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
*/
template<typename T>
int max (T* inout, int len) const
{
return 0;
}
/** @brief Wait until all processes have arrived at this point in the program.
@returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
*/
int barrier () const
{
return 0;
}
/** @brief Distribute an array from the process with rank root to all other processes
@returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
*/
template<typename T>
int broadcast (T* inout, int len, int root) const
{
return 0;
}
/** @brief Gather arrays on root task.
*
* Each process sends its in array of length len to the root process
* (including the root itself). In the root process these arrays are stored in rank
* order in the out array which must have size len * number of processes.
* @param[in] in The send buffer with the data to send.
* @param[out] out The buffer to store the received data in. Might have length zero on non-root
* tasks.
* @param[in] len The number of elements to send on each task.
* @param[in] root The root task that gathers the data.
* @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
*/
template<typename T>
int gather (T* in, T* out, int len, int root) const // note out must have same size as in
{
for (int i=0; i<len; i++)
out[i] = in[i];
return 0;
}
/** @brief Gather arrays of variable size on root task.
*
* Each process sends its in array of length sendlen to the root process
* (including the root itself). In the root process these arrays are stored in rank
* order in the out array.
* @param[in] in The send buffer with the data to be sent
* @param[in] sendlen The number of elements to send on each task
* @param[out] out The buffer to store the received data in. May have length zero on non-root
* tasks.
* @param[in] recvlen An array with size equal to the number of processes containing the number
* of elements to receive from process i at position i, i.e. the number that
* is passed as sendlen argument to this function in process i.
* May have length zero on non-root tasks.
* @param[out] displ An array with size equal to the number of processes. Data received from
* process i will be written starting at out+displ[i] on the root process.
* May have length zero on non-root tasks.
* @param[in] root The root task that gathers the data.
* @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
*/
template<typename T>
int gatherv (T* in, int sendlen, T* out, int* recvlen, int* displ, int root) const
{
for (int i=*displ; i<sendlen; i++)
out[i] = in[i];
return 0;
}
/** @brief Scatter array from a root to all other task.
*
* The root process sends the elements with index from k*len to (k+1)*len-1 in its array to
* task k, which stores it at index 0 to len-1.
* @param[in] send The array to scatter. Might have length zero on non-root
* tasks.
* @param[out] recv The buffer to store the received data in. Upon completion of the
* method each task will have same data stored there as the one in
* send buffer of the root task before.
* @param[in] len The number of elements in the recv buffer.
* @param[in] root The root task that gathers the data.
* @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
*/
template<typename T>
int scatter (T* send, T* recv, int len, int root) const // note out must have same size as in
{
for (int i=0; i<len; i++)
recv[i] = send[i];
return 0;
}
/** @brief Scatter arrays of variable length from a root to all other tasks.
*
* The root process sends the elements with index from send+displ[k] to send+displ[k]-1 in
* its array to task k, which stores it at index 0 to recvlen-1.
* @param[in] send The array to scatter. May have length zero on non-root
* tasks.
* @param[in] sendlen An array with size equal to the number of processes containing the number
* of elements to scatter to process i at position i, i.e. the number that
* is passed as recvlen argument to this function in process i.
* @param[in] displ An array with size equal to the number of processes. Data scattered to
* process i will be read starting at send+displ[i] on root the process.
* @param[out] recv The buffer to store the received data in. Upon completion of the
* method each task will have the same data stored there as the one in
* send buffer of the root task before.
* @param[in] recvlen The number of elements in the recv buffer.
* @param[in] root The root task that gathers the data.
* @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
*/
template<typename T>
int scatterv (T* send, int* sendlen, int* displ, T* recv, int recvlen, int root) const
{
for (int i=*displ; i<*sendlen; i++)
recv[i] = send[i];
return 0;
}
/**
* @brief Gathers data from all tasks and distribute it to all.
*
* The block of data sent from the jth process is received by every
* process and placed in the jth block of the buffer recvbuf.
*
* @param[in] sbuf The buffer with the data to send. Has to be the same for
* each task.
* @param[in] count The number of elements to send by any process.
* @param[out] rbuf The receive buffer for the data. Has to be of size
* notasks*count, with notasks being the number of tasks in the communicator.
* @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
*/
template<typename T>
int allgather(T* sbuf, int count, T* rbuf) const
{
for(T* end=sbuf+count; sbuf < end; ++sbuf, ++rbuf)
*rbuf=*sbuf;
return 0;
}
/**
* @brief Gathers data of variable length from all tasks and distribute it to all.
*
* The block of data sent from the jth process is received by every
* process and placed in the jth block of the buffer out.
*
* @param[in] in The send buffer with the data to send.
* @param[in] sendlen The number of elements to send on each task.
* @param[out] out The buffer to store the received data in.
* @param[in] recvlen An array with size equal to the number of processes containing the number
* of elements to recieve from process i at position i, i.e. the number that
* is passed as sendlen argument to this function in process i.
* @param[in] displ An array with size equal to the number of processes. Data recieved from
* process i will be written starting at out+displ[i].
* @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
*/
template<typename T>
int allgatherv (T* in, int sendlen, T* out, int* recvlen, int* displ) const
{
for (int i=*displ; i<sendlen; i++)
out[i] = in[i];
return 0;
}
/**
* @brief Compute something over all processes
* for each component of an array and return the result
* in every process.
*
* The template parameter BinaryFunction is the type of
* the binary function to use for the computation
*
* @param inout The array to compute on.
* @param len The number of components in the array
* @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
*/
template<typename BinaryFunction, typename Type>
int allreduce(Type* inout, int len) const
{
return 0;
}
/**
* @brief Compute something over all processes
* for each component of an array and return the result
* in every process.
*
* The template parameter BinaryFunction is the type of
* the binary function to use for the computation
*
* @param in The array to compute on.
* @param out The array to store the results in.
* @param len The number of components in the array
* @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
*/
template<typename BinaryFunction, typename Type>
void allreduce(Type* in, Type* out, int len) const
{
std::copy(in, in+len, out);
return;
}
};
}
#endif
|