This file is indexed.

/usr/include/palabos/parallelism/mpiManager.h is in libplb-dev 1.5~r1+repack1-2build2.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
/* This file is part of the Palabos library.
 *
 * Copyright (C) 2011-2015 FlowKit Sarl
 * Route d'Oron 2
 * 1010 Lausanne, Switzerland
 * E-mail contact: contact@flowkit.com
 *
 * The most recent release of Palabos can be downloaded at 
 * <http://www.palabos.org/>
 *
 * The library Palabos is free software: you can redistribute it and/or
 * modify it under the terms of the GNU Affero General Public License as
 * published by the Free Software Foundation, either version 3 of the
 * License, or (at your option) any later version.
 *
 * The library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU Affero General Public License for more details.
 *
 * You should have received a copy of the GNU Affero General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
*/

/** \file
 * Wrapper functions that simplify the use of MPI
 */

#ifndef MPI_MANAGER_H
#define MPI_MANAGER_H

#include "core/globalDefs.h"

#ifdef PLB_MPI_PARALLEL
#include "mpi.h"
#include <vector>
#include <string>
#endif


namespace plb {

namespace global {

#ifdef PLB_MPI_PARALLEL

/// Wrapper functions that simplify the use of MPI
class MpiManager {
public:
    /// Initializes the MPI manager and the MPI machine.
    void init(int *argc, char ***argv, bool verbous=false);
    /// Initializes the MPI manager, but assumes that the MPI
    ///   machine is handled by another instance.
    void init(MPI_Comm globalCommunicator_);
    /// Initializes the MPI manager, but assumes that the MPI
    ///   machine is handled by another instance.
    void init();
    /// Returns the number of processes
    int getSize() const;
    /// Returns the process ID
    int getRank() const;
    /// Returns process ID of main processor
    int bossId() const;
    /// Tells whether current processor is main processor
    bool isMainProcessor() const;
    /// Returns universal MPI-time in seconds
    double getTime() const;
    /// Returns the global communicator for this program or library instance.
    MPI_Comm getGlobalCommunicator() const;

    /// Synchronizes the processes
    void barrier();

    /// Sends data at *buf, blocking
    template <typename T>
    void send( T *buf, int count, int dest, int tag = 0 );

    /// Sends data at *buf, non blocking
    template <typename T>
    void iSend( T *buf, int count, int dest, MPI_Request* request, int tag = 0 );

    /// Sends data at *buf, assuming that receiver is ready.
    template <typename T>
    void rSend( T *buf, int count, int dest, int tag = 0 );

    /// Sends data at *buf, non blocking and request free
    template <typename T>
    void iSendRequestFree( T *buf, int count, int dest, int tag = 0 );

    /// Receives data at *buf, blocking
    template <typename T>
    void receive( T *buf, int count, int source, int tag = 0 );

    /// Receives data at *buf, non blocking
    template <typename T>
    void iRecv( T *buf, int count, int source, MPI_Request* request, int tag = 0 );

    /// Send and receive data between two partners
    template <typename T>
    void sendRecv( T *sendBuf, T *recvBuf, int count, int dest,
                   int source, int tag = 0 );

    /// Sends data to master processor
    template <typename T>
    void sendToMaster( T* sendBuf, int sendCount, bool iAmRoot );

    /// Scatter data from one processor over multiple processors
    template <typename T>
    void scatterV( T *sendBuf, T *recvBuf, int* sendCounts, int root = 0 );

    /// Gather data from multiple processors to one processor
    template <typename T>
    void gatherV( T* sendBuf, T* recvBuf, int *recvCounts, int root = 0 );


    /// Broadcast data from one processor to multiple processors
    template <typename T>
    void bCast( T* sendBuf, int sendCount, int root = 0 );
    
    /// Special case for broadcasting strings. Memory handling is automatic.
    void bCast( std::string& message, int root = 0 );

    /// Broadcast data when root is unknown to other processors
    template <typename T>
    void bCastThroughMaster( T* sendBuf, int sendCount, bool iAmRoot );

    /// Reduction operation toward one processor
    template <typename T>
    void reduce( T sendVal, T& recvVal, MPI_Op op, int root = 0 );

    /// Element-per-element reduction of a vector of data
    template <typename T>
    void reduceVect( std::vector<T>& sendVal, std::vector<T>& recvVal,
                     MPI_Op op, int root = 0 );

    /// Inplace element-per-element reduction of a vector of data; result
    ///   available on all MPI threads. 
    template <typename T>
    void allReduceVect( std::vector<T>& sendRecvVal, MPI_Op op );

    /// Reduction operation, followed by a broadcast
    template <typename T>
    void reduceAndBcast(T& reductVal, MPI_Op op, int root = 0 );

    /// Complete a non-blocking MPI operation
    void wait(MPI_Request* request, MPI_Status* status);

private:
    /// Implementation code for Scatter
    template <typename T>
    void scatterv_impl(T *sendBuf, int* sendCounts, int* displs,
                       T* recvBuf, int recvCount, int root);

    /// Implementation code for Gather
    template <typename T>
    void gatherv_impl(T* sendBuf, int sendCount, T* recvBuf, int* recvCounts,
                      int* displs, int root);
private:
    MpiManager();
    ~MpiManager();
private:
    int numTasks, taskId;
    bool ok;
    bool responsibleForMpiMachine;
    MPI_Comm globalCommunicator;

friend MpiManager& mpi();
};

#else  // #ifdef PLB_MPI_PARALLEL

class MpiManager {
public:
    /// Initializes the mpi manager
    void init(int *argc, char ***argv, bool verbous=false) { }
    /// Initializes the MPI manager, but assumes that the MPI
    ///   machine is handled by another instance.
    void init() { }
    /// Returns the number of processes
    int getSize() const { return 1; }
    /// Returns the process ID
    int getRank() const { return 0; }
    /// Returns process ID of main processor
    int bossId() const { return 0; }
    /// Tells whether current processor is main processor
    bool isMainProcessor() const { return true; }
    /// Broadcast data from one processor to multiple processors
    template <typename T>
    void bCast(T* sendBuf, int sendCount, int root = 0) { }
    /// Special case for broadcasting strings. Memory handling is automatic.
    void bCast(std::string& message, int root = 0) { }
    /// Synchronizes the processes
    void barrier() { }

friend MpiManager& mpi();
};

#endif  // PLB_MPI_PARALLEL

inline MpiManager& mpi() {
    static MpiManager instance;
    return instance;
}

}  // namespace global

}  // namespace plb


#endif  // MPI_MANAGER_H