This file is indexed.

/usr/include/dolfin/mesh/MeshPartitioning.h is in libdolfin-dev 2017.2.0.post0-2.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
// Copyright (C) 2008-2013 Niclas Jansson, Ola Skavhaug, Anders Logg,
// Garth N. Wells and Chris Richardson
//
// This file is part of DOLFIN.
//
// DOLFIN is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// DOLFIN is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
//
// Modified by Garth N. Wells, 2010
// Modified by Kent-Andre Mardal, 2011
// Modified by Chris Richardson, 2013
//
// First added:  2008-12-01
// Last changed: 2014-06-20

#ifndef __MESH_PARTITIONING_H
#define __MESH_PARTITIONING_H

#include <cstdint>
#include <map>
#include <utility>
#include <vector>
#include <boost/multi_array.hpp>
#include <dolfin/log/log.h>
#include <dolfin/common/Set.h>
#include "CellType.h"
#include "DistributedMeshTools.h"
#include "LocalMeshValueCollection.h"
#include "Mesh.h"


namespace dolfin
{
  // Developer note: MeshFunction and MeshValueCollection cannot
  // appear in the implementations that appear in this file of the
  // templated functions as this leads to a circular
  // dependency. Therefore the functions are templated over these
  // types.

  template <typename T> class MeshFunction;
  template <typename T> class MeshValueCollection;
  class LocalMeshData;

  /// This class partitions and distributes a mesh based on
  /// partitioned local mesh data.The local mesh data will also be
  /// repartitioned and redistributed during the computation of the
  /// mesh partitioning.
  ///
  /// After partitioning, each process has a local mesh and some data
  /// that couples the meshes together.

  class MeshPartitioning
  {
  public:

    /// Build a distributed mesh from a local mesh on process 0
    static void build_distributed_mesh(Mesh& mesh);

    /// Build a distributed mesh from a local mesh on process 0, with
    /// distribution of cells supplied (destination processes for each
    /// cell)
    static void
      build_distributed_mesh(Mesh& mesh, const std::vector<int>& cell_partition,
                             const std::string ghost_mode);

    /// Build a distributed mesh from 'local mesh data' that is
    /// distributed across processes
    static void build_distributed_mesh(Mesh& mesh, const LocalMeshData& data,
                                       const std::string ghost_mode);

    /// Build a MeshValueCollection based on LocalMeshValueCollection
    template<typename T>
      static void
      build_distributed_value_collection(MeshValueCollection<T>& values,
                                const LocalMeshValueCollection<T>& local_data,
                                const Mesh& mesh);

  private:

    // Compute cell partitioning from local mesh data. Returns a
    // vector 'cell -> process' vector for cells in LocalMeshData, and
    // a map 'local cell index -> processes' to which ghost cells must
    // be sent
    static
    void partition_cells(const MPI_Comm& mpi_comm,
                         const LocalMeshData& mesh_data,
                         const std::string partitioner,
                         std::vector<int>& cell_partition,
                         std::map<std::int64_t, std::vector<int>>& ghost_procs);

    // Build a distributed mesh from local mesh data with a computed
    // partition
    static void build(Mesh& mesh, const LocalMeshData& data,
                      const std::vector<int>& cell_partition,
                      const std::map<std::int64_t, std::vector<int>>& ghost_procs,
                      const std::string ghost_mode);

    // FIXME: Improve this docstring
    // Distribute a layer of cells attached by vertex to boundary updating
    // new_mesh_data and shared_cells. Used when ghosting by vertex.
    static
    void distribute_cell_layer(MPI_Comm mpi_comm,
                               const int num_regular_cells,
                               const std::int64_t num_global_vertices,
                               std::map<std::int32_t, std::set<unsigned int>>& shared_cells,
                               boost::multi_array<std::int64_t, 2>& cell_vertices,
                               std::vector<std::int64_t>& global_cell_indices,
                               std::vector<int>& cell_partition);

    // FIXME: make clearer what goes in and what comes out
    // Reorder cells by Gibbs-Poole-Stockmeyer algorithm (via SCOTCH). Returns
    // the tuple (new_shared_cells, new_cell_vertices,new_global_cell_indices).
    static
    void reorder_cells_gps(MPI_Comm mpi_comm,
     const unsigned int num_regular_cells,
     const CellType& cell_type,
     const std::map<std::int32_t, std::set<unsigned int>>& shared_cells,
     const boost::multi_array<std::int64_t, 2>& cell_vertices,
     const std::vector<std::int64_t>& global_cell_indices,
     std::map<std::int32_t, std::set<unsigned int>>& reordered_shared_cells,
     boost::multi_array<std::int64_t, 2>& reordered_cell_vertices,
     std::vector<std::int64_t>& reordered_global_cell_indices);

    // FIXME: make clearer what goes in and what comes out
    // Reorder vertices by Gibbs-Poole-Stockmeyer algorithm (via SCOTCH).
    // Returns the pair (new_vertex_indices, new_vertex_global_to_local).
    static
    void
    reorder_vertices_gps(MPI_Comm mpi_comm,
     const std::int32_t num_regular_vertices,
     const std::int32_t num_regular_cells,
     const int  num_cell_vertices,
     const boost::multi_array<std::int64_t, 2>& cell_vertices,
     const std::vector<std::int64_t>& vertex_indices,
     const std::map<std::int64_t, std::int32_t>& vertex_global_to_local,
     std::vector<std::int64_t>& reordered_vertex_indices,
     std::map<std::int64_t, std::int32_t>& reordered_vertex_global_to_local);

    // FIXME: Update, making clear exactly what is computed
    // This function takes the partition computed by the partitioner
    // (which tells us to which process each of the local cells stored in
    // LocalMeshData on this process belongs) and sends the cells
    // to the appropriate owning process. Ghost cells are also sent,
    // along with the list of sharing processes.
    // A new LocalMeshData object is populated with the redistributed
    // cells. Return the number of non-ghost cells on this process.
    static
    std::int32_t
      distribute_cells(const MPI_Comm mpi_comm,
        const LocalMeshData& data,
        const std::vector<int>& cell_partition,
        const std::map<std::int64_t, std::vector<int>>& ghost_procs,
        boost::multi_array<std::int64_t, 2>& new_cell_vertices,
        std::vector<std::int64_t>& new_global_cell_indices,
        std::vector<int>& new_cell_partition,
        std::map<std::int32_t, std::set<unsigned int>>& shared_cells);

    // FIXME: Improve explaination
    // Utility to convert received_vertex_indices into
    // vertex sharing information
    static void build_shared_vertices(MPI_Comm mpi_comm,
     std::map<std::int32_t, std::set<unsigned int>>& shared_vertices,
     const std::map<std::int64_t, std::int32_t>& vertex_global_to_local_indices,
     const std::vector<std::vector<std::size_t>>& received_vertex_indices);

    // FIXME: make clear what is computed
    // Distribute vertices and vertex sharing information
    static void
      distribute_vertices(const MPI_Comm mpi_comm,
        const LocalMeshData& mesh_data,
        const std::vector<std::int64_t>& vertex_indices,
        boost::multi_array<double, 2>& new_vertex_coordinates,
        std::map<std::int64_t, std::int32_t>& vertex_global_to_local_indices,
        std::map<std::int32_t, std::set<unsigned int>>& shared_vertices_local);

    // Compute the local->global and global->local maps for all local vertices
    // on this process, from the global vertex indices on each local cell.
    // Returns the number of regular (non-ghosted) vertices.
    static std::int32_t compute_vertex_mapping(MPI_Comm mpi_comm,
                  const std::int32_t num_regular_cells,
                  const boost::multi_array<std::int64_t, 2>& cell_vertices,
                  std::vector<std::int64_t>& vertex_indices,
                  std::map<std::int64_t, std::int32_t>& vertex_global_to_local);

    // FIXME: Improve pre-conditions explaination
    // Build mesh
    static void build_local_mesh(Mesh& mesh,
      const std::vector<std::int64_t>& global_cell_indices,
      const boost::multi_array<std::int64_t, 2>& cell_global_vertices,
      const CellType::Type cell_type,
      const int tdim,
      const std::int64_t num_global_cells,
      const std::vector<std::int64_t>& vertex_indices,
      const boost::multi_array<double, 2>& vertex_coordinates,
      const int gdim,
      const std::int64_t num_global_vertices,
      const std::map<std::int64_t, std::int32_t>& vertex_global_to_local_indices);

    // Create and attach distributed MeshDomains from local_data
    static void build_mesh_domains(Mesh& mesh, const LocalMeshData& local_data);

    // Create and attach distributed MeshDomains from local_data
    // [entry, (cell_index, local_index, value)]
    template<typename T, typename MeshValueCollection>
    static void build_mesh_value_collection(const Mesh& mesh,
      const std::vector<std::pair<std::pair<std::size_t, std::size_t>, T>>& local_value_data,
      MeshValueCollection& mesh_values);
  };
  //---------------------------------------------------------------------------
  template<typename T>
  void MeshPartitioning::build_distributed_value_collection(MeshValueCollection<T>& values,
             const LocalMeshValueCollection<T>& local_data, const Mesh& mesh)
  {
    // Extract data
    const std::vector<std::pair<std::pair<std::size_t, std::size_t>, T>>& local_values
      = local_data.values();

    // Build MeshValueCollection from local data
    build_mesh_value_collection(mesh, local_values, values);
  }
  //---------------------------------------------------------------------------
  template<typename T, typename MeshValueCollection>
  void MeshPartitioning::build_mesh_value_collection(const Mesh& mesh,
    const std::vector<std::pair<std::pair<std::size_t, std::size_t>, T>>& local_value_data,
    MeshValueCollection& mesh_values)
  {
    // Get MPI communicator
    const MPI_Comm mpi_comm = mesh.mpi_comm();

    // Get topological dimensions
    const std::size_t D = mesh.topology().dim();
    const std::size_t dim = mesh_values.dim();
    mesh.init(dim);

    // This is required for old-style mesh data that uses (cell index,
    // local entity index)
    mesh.init(dim, D);

    // Clear MeshValueCollection values
    mesh_values.clear();

    // Initialise global entity numbering
    DistributedMeshTools::number_entities(mesh, dim);

    // Get mesh value collection used for marking
    MeshValueCollection& markers = mesh_values;

    // Get local mesh data for domains
    const std::vector< std::pair<std::pair<std::size_t, std::size_t>, T>>&
      ldata = local_value_data;

    // Get local local-to-global map
    if (!mesh.topology().have_global_indices(D))
    {
      dolfin_error("MeshPartitioning.h",
                   "build mesh value collection",
                   "Do not have have_global_entity_indices");
    }

    // Get global indices on local process
    const auto& global_entity_indices = mesh.topology().global_indices(D);

    // Add local (to this process) data to domain marker
    std::vector<std::size_t> off_process_global_cell_entities;

    // Build and populate a local map for global_entity_indices
    std::map<std::size_t, std::size_t> map_of_global_entity_indices;
    for (std::size_t i = 0; i < global_entity_indices.size(); i++)
      map_of_global_entity_indices[global_entity_indices[i]] = i;

    for (std::size_t i = 0; i < ldata.size(); ++i)
    {
      const std::map<std::int32_t, std::set<unsigned int>>& sharing_map
        = mesh.topology().shared_entities(D);

      const std::size_t global_cell_index = ldata[i].first.first;
      std::map<std::size_t, std::size_t>::const_iterator data
        = map_of_global_entity_indices.find(global_cell_index);
      if (data != map_of_global_entity_indices.end())
      {
        const std::size_t local_cell_index = data->second;
        const std::size_t entity_local_index = ldata[i].first.second;
        const T value = ldata[i].second;
        markers.set_value(local_cell_index, entity_local_index, value);

        // If shared with other processes, add to off process list
        if (sharing_map.find(local_cell_index) != sharing_map.end())
          off_process_global_cell_entities.push_back(global_cell_index);
      }
      else
        off_process_global_cell_entities.push_back(global_cell_index);
    }

    // Get destinations and local cell index at destination for
    // off-process cells
    const std::map<std::size_t, std::set<std::pair<std::size_t, std::size_t>>>
      entity_hosts
      = DistributedMeshTools::locate_off_process_entities(off_process_global_cell_entities,
                                                          D, mesh);

    // Number of MPI processes
    const std::size_t num_processes = MPI::size(mpi_comm);

    // Pack data to send to appropriate process
    std::vector<std::vector<std::size_t>> send_data0(num_processes);
    std::vector<std::vector<T>> send_data1(num_processes);
    std::map<std::size_t, std::set<std::pair<std::size_t, std::size_t>>>::const_iterator entity_host;

    {
      // Build a convenience map in order to speedup the loop over
      // local data
      std::map<std::size_t, std::set<std::size_t>> map_of_ldata;
      for (std::size_t i = 0; i < ldata.size(); ++i)
        map_of_ldata[ldata[i].first.first].insert(i);

      for (entity_host = entity_hosts.begin(); entity_host != entity_hosts.end();
           ++entity_host)
      {
        const std::size_t host_global_cell_index = entity_host->first;
        const std::set<std::pair<std::size_t, std::size_t>>& processes_data
          = entity_host->second;

        // Loop over local data
        std::map<std::size_t, std::set<std::size_t>>::const_iterator ldata_it
          = map_of_ldata.find(host_global_cell_index);
        if (ldata_it != map_of_ldata.end())
        {
          for (std::set<std::size_t>::const_iterator it = ldata_it->second.begin();
               it != ldata_it->second.end(); it++)
          {
            const std::size_t local_entity_index = ldata[*it].first.second;
            const T domain_value = ldata[*it].second;

            std::set<std::pair<std::size_t, std::size_t>>::const_iterator process_data;
            for (process_data = processes_data.begin();
                 process_data != processes_data.end(); ++process_data)
            {
              const std::size_t proc = process_data->first;
              const std::size_t local_cell_entity = process_data->second;
              send_data0[proc].push_back(local_cell_entity);
              send_data0[proc].push_back(local_entity_index);
              send_data1[proc].push_back(domain_value);
            }
          }
        }
      }
    }

    // Send/receive data
    std::vector<std::size_t> received_data0;
    std::vector<T> received_data1;
    MPI::all_to_all(mpi_comm, send_data0, received_data0);
    MPI::all_to_all(mpi_comm, send_data1, received_data1);
    dolfin_assert(2*received_data1.size() == received_data0.size());

    // Add received data to mesh domain
    for (std::size_t i = 0; i < received_data1.size(); ++i)
    {
      const std::size_t local_cell_entity = received_data0[2*i];
      const std::size_t local_entity_index = received_data0[2*i + 1];
      const T value = received_data1[i];
      dolfin_assert(local_cell_entity < mesh.num_cells());
      markers.set_value(local_cell_entity, local_entity_index, value);
    }

  }
  //---------------------------------------------------------------------------

}

#endif