/usr/include/palabos/multiBlock/localMultiBlockInfo2D.h is in libplb-dev 1.5~r1+repack1-2build2.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 | /* This file is part of the Palabos library.
*
* Copyright (C) 2011-2015 FlowKit Sarl
* Route d'Oron 2
* 1010 Lausanne, Switzerland
* E-mail contact: contact@flowkit.com
*
* The most recent release of Palabos can be downloaded at
* <http://www.palabos.org/>
*
* The library Palabos is free software: you can redistribute it and/or
* modify it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* The library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/** \file
* Geometry specifications for 2D multiblocks -- header file.
*/
#ifndef LOCAL_MULTI_BLOCK_INFO_2D_H
#define LOCAL_MULTI_BLOCK_INFO_2D_H
#include "core/globalDefs.h"
#include "core/geometry2D.h"
#include "multiBlock/threadAttribution.h"
#include "multiBlock/sparseBlockStructure2D.h"
#include <vector>
namespace plb {
/// Hold extra information on the blocks which are local to the current MPI thread;
/// for example, overlaps with adjacent blocks.
class Overlap2D {
public:
Overlap2D(plint originalId_, plint overlapId_, Box2D const& intersection_)
: originalId(originalId_), overlapId(overlapId_),
originalRegion(intersection_),
overlapRegion(intersection_)
{ }
Overlap2D(plint originalId_, plint overlapId_,
Box2D const& originalRegion_,
plint shiftX, plint shiftY)
: originalId(originalId_), overlapId(overlapId_),
originalRegion(originalRegion_),
overlapRegion(originalRegion.shift(-shiftX, -shiftY))
{ }
plint getOriginalId() const { return originalId; }
plint getOverlapId() const { return overlapId; }
/// Region (in absolute coordinates) on the original block.
Box2D const& getOriginalCoordinates() const { return originalRegion; }
/// Region (in absolute coordinates) on the overlapping block.
/** This is usually identical with the region on the original block. An
* exception are periodic overlaps, in whick regions on opposite ends
* of the block are brought into relation.
**/
Box2D const& getOverlapCoordinates() const { return overlapRegion; }
plint getShiftX() const { return originalRegion.x0 - overlapRegion.x0; }
plint getShiftY() const { return originalRegion.y0 - overlapRegion.y0; }
private:
plint originalId, overlapId;
Box2D originalRegion, overlapRegion;
};
/// Define a global ordering for overlaps.
/** This can be used for example to guarantee that the MPI communication
* between a pair of processes is executed in the same order, and the
* communications don't cross.
* */
inline bool operator<(Overlap2D const& overlap1, Overlap2D const& overlap2)
{
return
(overlap1.getOriginalId() < overlap2.getOriginalId()) || (
(overlap1.getOriginalId() == overlap2.getOriginalId()) && (
(overlap1.getOverlapId() < overlap2.getOverlapId()) || (
(overlap1.getOverlapId() == overlap2.getOverlapId()) &&
(overlap1.getOriginalCoordinates() < overlap2.getOriginalCoordinates()) ) ) );
}
/// This structure holds both overlap information and orientation of the boundary.
/** In case of periodic overlaps, it is important to know the orientation of the
* boundary, additionally to the coordinates of the overlap region. This is
* required when the communication step within a multi block is executed. Given
* that the user can selectively swith on/off periodicity, the multi block
* must be able to decide which periodic overlaps to communicate and which not.
*/
struct PeriodicOverlap2D {
PeriodicOverlap2D(Overlap2D const& overlap_, plint normalX_, plint normalY_);
Overlap2D overlap;
plint normalX;
plint normalY;
};
/// Define a global ordering for periodic overlaps.
inline bool operator<( PeriodicOverlap2D const& overlap1,
PeriodicOverlap2D const& overlap2 )
{
return overlap1.overlap < overlap2.overlap;
}
/// Determine pairs of domains associated to a data transfer between two blocks.
std::vector<Overlap2D> copyAllDataTransfer (
SparseBlockStructure2D const& block1,
SparseBlockStructure2D const& block2 );
/// Determine pairs of domains associated to a data transfer between domains on two blocks.
/** It is assumed that the two domains have the same extent.
**/
std::vector<Overlap2D> copyDomainDataTransfer (
SparseBlockStructure2D const& block1, Box2D block1Domain,
SparseBlockStructure2D const& block2, Box2D block2Domain );
class LocalMultiBlockInfo2D {
public:
LocalMultiBlockInfo2D( SparseBlockStructure2D const& sparseBlock,
ThreadAttribution const& attribution,
plint envelopeWidth_ );
/// Index of all blocks local to current processor
std::vector<plint> const& getBlocks() const;
/// Index of all overlaps for which original or overlap data are on current processor
std::vector<Overlap2D> const& getNormalOverlaps() const;
/// Index of all periodic overlaps for which original or overlap data are
/// on current processor.
std::vector<PeriodicOverlap2D> const& getPeriodicOverlaps() const;
/// Index of all periodic overlaps for which overlap data are on current processor
std::vector<PeriodicOverlap2D> const& getPeriodicOverlapWithRemoteData() const;
void swap(LocalMultiBlockInfo2D& rhs);
private:
/// Determine all blocks which are associated to the current MPI thread.
void computeMyBlocks(SparseBlockStructure2D const& sparseBlock,
ThreadAttribution const& attribution);
/// Compute normal overlaps for all local blocks.
void computeAllNormalOverlaps(SparseBlockStructure2D const& sparseBlock);
/// Compute normal overlaps for one local block.
void computeNormalOverlaps(SparseBlockStructure2D const& sparseBlock, plint blockId);
/// Compute periodic overlaps for all local blocks.
void computeAllPeriodicOverlaps(SparseBlockStructure2D const& sparseBlock);
/// Compute periodic overlaps for one local block.
void computePeriodicOverlaps(SparseBlockStructure2D const& sparseBlock, plint blockId);
private:
plint envelopeWidth;
std::vector<plint> myBlocks;
std::vector<Overlap2D> normalOverlaps;
std::vector<PeriodicOverlap2D> periodicOverlaps;
std::vector<PeriodicOverlap2D> periodicOverlapWithRemoteData;
};
} // namespace plb
#endif // LOCAL_MULTI_BLOCK_INFO_2D_H
|