This file is indexed.

/usr/lib/python2.7/dist-packages/PySPH-1.0a4.dev0-py2.7-linux-x86_64.egg/pysph/parallel/parallel_utils.py is in python-pysph 0~20160514.git91867dc-4build1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
"""NNPS utility functions to work with Zoltan lists"""
import numpy

from pyzoltan.core.zoltan import get_zoltan_id_type_max
from pysph.base.particle_array import ParticleArray

UINT_MAX = get_zoltan_id_type_max()

def invert_export_lists(comm, exportProcs, recv_count):
    """Invert a given set of export indices.

    Parameters:
    ------------

    comm : mpi4py.MPI.Comm
        A valid MPI communicator

    exportProcs : IntArray
        A list of processors to send objects to

    recv_count : np.ndarray (out)
        Return array of length size which upon output, gives the number of
        objects to be received from a given processor.

    Given a list of objects that need to be exported to remote processors,
    the job of invert lists is to inform each processor the number of
    objects it will receive from other processors. This situation arises
    for example in the cell based partitioning in PySPH. From the cell
    export lists, we have a list of particle indices that need to be
    exported to remote neighbors.

    """
    # reset the recv_counts to 0
    recv_count[:] = 0

    # get the rank and size for the communicator
    size = comm.Get_size()
    rank = comm.Get_rank()

    # count the number of objects we need to send to each processor
    send_count = np.zeros(shape=size, dtype=np.uint32)
    numExport = exportProcs.length

    for i in range(numExport):
        pid = exportProcs[i]
        send_count[pid] += 1

    # receive buffer for all gather
    recvbuf = np.zeros(shape=size*size, dtype=np.uint32)

    # do an all gather to receive the data
    comm.Allgather(sendbuf=send_count, recvbuf=recvbuf)

    # store the number of objects to be received from each processor
    for i in range(size):
        proc_send_count = recvbuf[i*size:(i+1)*size]
        recv_count[i] = proc_send_count[rank]

def count_recv_data(
    comm, recv, numImport, importProcs):
    """Count the data to be received from different processors.

    Parameters:
    -----------

    comm : mpi.Comm
        MPI communicator

    recv : dict
        Upon output, will contain keys corresponding to processors and
        values indicating number of objects to receive from that proc.

    numImport : int
        Zoltan generated total number of objects to be imported
        to the calling proc

    importProcs : DoubleArray
        Zoltan generated list for processors from where objects are
        to be received.

    """
    rank = comm.Get_rank()
    size = comm.Get_size()

    recv.clear()
    for processor in range(size):
        recv[processor] = 0

    for i in range(numImport):
        processor = importProcs[i]
        recv[processor] += 1

    for processor in recv.keys():
        if recv[processor] == 0:
            del recv[processor]

def get_send_data(
    comm, pa, lb_props, _exportIndices, _exportProcs):
    """Collect the data to send in a dictionary.

    Parameters:
    -----------

    comm : mpi.Comm
        MPI communicator

    pa : ParticleArray
        Reference to the particle array from where send data is gathered

    lb_props : list
        A list of prop names to collect data

    _exportIndices : UIntArray
        Zoltan generated list of local indices to export

    _exportProcs : IntArray
        Zoltan generated list of processors to export to

    Returns a dictionary of dictionaries 'send' which is keyed on
    processor id and with values a dictionary of prop names and
    corresponding particle data.

    """
    rank = comm.Get_rank()
    size = comm.Get_size()

    procs = _exportProcs.get_npy_array()
    exportIndices = _exportIndices.get_npy_array()

    props = {}
    for prop in lb_props:
        props[prop] = pa.get_carray(prop).get_npy_array()

    send = {}
    for pid in range(size):
        indices = numpy.where( procs == pid )[0]
        #if len(indices) > 0:
        send[pid] = {}
        for prop, prop_array in props.items():
            send[pid][prop] = prop_array[ exportIndices[indices] ]

        # save the local ids exported to each processor
        send[pid]['lid'] = exportIndices[indices]
        send[pid]['msglength'] = exportIndices[indices].size

    return send

def Recv(comm, localbuf, recvbuf, source, localbufsize=0, tag=0):
    """MPI Receive operation

    Parameters:
    -----------

    comm : mpi.Comm
        The mpi communcator

    localbuf : CArray
        The local buffer to which the data is received in

    recvbuf : CArray
        the buffer in which to receive data from comm.Recv

    source : int
        processor from where the data originates

    localbufsize : int
        Current length index for the local buffer. Defaults to 0

    tag : int
        optional message tag

    For the situation in which we receive data from multiple
    processors to be stored in a single array (localbuf), we receive
    the data in 'recvbuf' and then add it to the correct indices using
    a pointer to the current index (localbufsize) and the message
    length (recvbuf.length)

    """
    # get the message length. we assume this is known before actually
    # doing the receive.
    msglength = recvbuf.length

    # get the Numpy buffer for the C-arrays
    _localbuf = localbuf.get_npy_array()
    _recvbuf = recvbuf.get_npy_array()

    # Receive the Numpy buffer from source
    comm.Recv( buf=_recvbuf, source=source, tag=tag )

    # add the contents to the local buffer. If localbufsize is 0, then
    # the two arrays are the same.
    _localbuf[localbufsize:localbufsize+msglength] = _recvbuf[:]

def get_particle_array(name="", **props):
    """Return a particle array"""
    nprops = len(props)
    np = 0

    prop_dict = {}
    for prop in props.keys():
        data = numpy.asarray(props[prop])
        np = data.size

        if prop in ['pid', 'type', 'tag']:
            prop_dict[prop] = {'data':data,
                               'type':'int',
                               'name':prop}

        elif prop in ['gid']:
            prop_dict[prop] = {'data':data.astype(numpy.uint32),
                               'type': 'unsigned int',
                               'name':prop}
        else:
            prop_dict[prop] = {'data':data,
                               'type':'double',
                               'name':prop}

    default_props = ['x', 'y', 'z', 'h', 'rho', 'gid', 'tag', 'type', 'pid']

    for prop in default_props:
        if not prop in prop_dict:
            if prop in ["type", "tag", "pid"]:
                prop_dict[prop] = {'name':prop, 'type':'int',
                                   'default':0}

            elif prop in ['gid']:
                data = numpy.ones(shape=np, dtype=numpy.uint32)
                data[:] = UINT_MAX

                prop_dict[prop] = {'name':prop, 'type':'unsigned int',
                                   'data':data}

            else:
                prop_dict[prop] = {'name':prop, 'type':'double',
                                   'default':0}
    # create the particle array
    pa = ParticleArray(name="",**prop_dict)

    return pa