This file is indexed.

/usr/share/pyshared/dap/proxy.py is in python-dap 2.2.6.7-2.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
from __future__ import division

"""Proxy class to DAP data.

This module implements a proxy object that behaves like an array and 
downloads data transparently from a DAP server when sliced. It is used
when building a representation of the dataset, and should be not 
directly used.
"""

__author__ = "Roberto De Almeida <rob@pydap.org>"

import sys

from dap.xdr import DapUnpacker
from dap.helper import fix_slice
from dap.util.http import openurl

try:
    from numpy import ndarray as ndarray_
except ImportError:
    ndarray_ = None


class Proxy(object):
    """
    A proxy to data stored in a DODS server.
    """
    def __init__(self, url, id, shape, type, filters=None, cache=None, username=None, password=None):
        self.url = url
        self.id = id
        self.shape = shape
        self.type = type

        self.filters = filters or []

        self.cache = cache
        self.username = username
        self.password = password

    def __iter__(self):
        return iter(self[:])

    def __getitem__(self, index):
        """
        Download data from DAP server.
        
        When the proxy object is sliced, it build an URL from the slice
        and retrieves the data from the DAP server.
        """
        # Build the base URL.
        url = '%s.dods?%s' % (self.url, self.id)

        if self.shape:
            # Fix the index for incomplete slices or ellipsis.
            index = fix_slice(len(self.shape), index)

            # Force index to tuple, to iterate over the slices.
            if not isinstance(index, tuple):
                index = index,

            # Iterate over the sliced dimensions.
            i = 0
            outshape = []
            for dimension in index:
                # If dimension is a slice, get start, step and stop.
                if isinstance(dimension, slice):
                    start = dimension.start  or 0
                    step  = dimension.step   or 1
                    if dimension.stop:
                        stop = dimension.stop-1
                    else:
                        stop = self.shape[i]-1

                # Otherwise, retrieve a single value.
                else:
                    start = dimension
                    stop  = dimension
                    step  = 1

                # When stop is not specified, use the shape.
                if stop == sys.maxint or stop > self.shape[i]-1:
                    stop = self.shape[i]-1
                # Negative slices. 
                elif stop < 0:
                    stop = self.shape[i]+stop

                # Negative starting slices.
                if start < 0: start = self.shape[i]+start

                # Build the URL used to retrieve the data.
                url = '%s[%s:%s:%s]' % (url, str(start), str(step), str(stop))

                # outshape is a list of the slice dimensions.
                outshape.append(1+(stop-start)//step)
                
                # Update to next dimension.
                i += 1

        else:
            # No need to resize the data.
            outshape = None

        # Make the outshape consistent with the numpy and pytables conventions.
        if outshape is not None:
            outshape = self._reduce_outshape(outshape)

        # Check for filters.
        if self.filters:
            ce = '&'.join(self.filters)
            url = '%s&%s' % (url, ce)

        # Fetch data.
        resp, data = openurl(url, self.cache, self.username, self.password)

        # First lines are ASCII information that end with 'Data:\n'.
        start = data.index('Data:\n') + len('Data:\n')
        xdrdata = data[start:]

        # Unpack data.
        output = DapUnpacker(xdrdata, self.shape, self.type, outshape).getvalue()

        # Convert length 1 arrays to scalars
        if ndarray_:
            if outshape == () and isinstance(output, ndarray_):
                output = output[0]

        return output


    def _reduce_outshape(self, outshape):
        """Make the outshape consistent with the numpy and pytables conventions.

        (1, N) -> (N,)
        (N, 1) -> (N,)
        (N, M, 1) -> (N, M)

        Basically, all ones are removed from the shape.
        """
        return tuple([index for index in outshape if index != 1])

def _test():
    import doctest
    doctest.testmod()

if __name__ == "__main__":
    _test()