This file is indexed.

/usr/lib/python3/dist-packages/openpyxl/formula/tokenizer.py is in python3-openpyxl 2.3.0-1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
"""
This module contains a tokenizer for Excel formulae.

The tokenizer is based on the Javascript tokenizer found at
http://ewbi.blogs.com/develops/2004/12/excel_formula_p.html written by Eric
Bachtal
"""

import re


class TokenizerError(Exception):
    "Base class for all Tokenizer errors."


class Tokenizer(object):

    """
    A tokenizer for Excel worksheet formulae.

    Converts a unicode string representing an Excel formula (in A1 notation)
    into a sequence of `Token` objects.

    `formula`: The unicode string to tokenize

    Tokenizer defines a method `.parse()` to parse the formula into tokens,
    which can then be accessed through the `.items` attribute.

    """

    SN_RE = re.compile("^[1-9](\\.[0-9]+)?E$")  # Scientific notation
    WSPACE_RE = re.compile(" +")
    STRING_REGEXES = {
        # Inside a string, all characters are treated as literals, except for
        # the quote character used to start the string. That character, when
        # doubled is treated as a single character in the string. If an
        # unmatched quote appears, the string is terminated.
        '"': re.compile('"(?:[^"]*"")*[^"]*"(?!")'),
        "'": re.compile("'(?:[^']*'')*[^']*'(?!')"),
    }
    ERROR_CODES = ("#NULL!", "#DIV/0!", "#VALUE!", "#REF!", "#NAME?",
                   "#NUM!", "#N/A")
    TOKEN_ENDERS = ',;}) +-*/^&=><%'  # Each of these characters, marks the
                                       # end of an operand token

    def __init__(self, formula):
        self.formula = formula
        self.items = []
        self.token_stack = []  # Used to keep track of arrays, functions, and
                               # parentheses
        self.offset = 0  # How many chars have we read
        self.token = []  # Used to build up token values char by char

    def parse(self):
        "Populate self.items with the tokens from the formula."
        if not self.formula:
            return
        elif self.formula[0] == '=':
            self.offset += 1
        else:
            self.items.append(Token(self.formula, Token.LITERAL))
            return
        consumers = (
            ('"\'', self.parse_string),
            ('[', self.parse_brackets),
            ('#', self.parse_error),
            (' ', self.parse_whitespace),
            ('+-*/^&=><%', self.parse_operator),
            ('{(', self.parse_opener),
            (')}', self.parse_closer),
            (';,', self.parse_separator),
        )
        dispatcher = {}  # maps chars to the specific parsing function
        for chars, consumer in consumers:
            dispatcher.update(dict.fromkeys(chars, consumer))
        while self.offset < len(self.formula):
            if self.check_scientific_notation():  # May consume one character
                continue
            curr_char = self.formula[self.offset]
            if curr_char in self.TOKEN_ENDERS:
                self.save_token()
            if curr_char in dispatcher:
                self.offset += dispatcher[curr_char]()
            else:
                # TODO: this can probably be sped up using a regex to get to
                # the next interesting character
                self.token.append(curr_char)
                self.offset += 1
        self.save_token()

    def parse_string(self):
        """
        Parse a "-delimited string or '-delimited link.

        The offset must be pointing to either a single quote ("'") or double
        quote ('"') character. The strings are parsed according to Excel
        rules where to escape the delimiter you just double it up. E.g.,
        "abc""def" in Excel is parsed as 'abc"def' in Python.

        Returns the number of characters matched. (Does not update
        self.offset)

        """
        self.assert_empty_token()
        delim = self.formula[self.offset]
        assert delim in ('"', "'")
        regex = self.STRING_REGEXES[delim]
        match = regex.match(self.formula[self.offset:])
        if match is None:
            subtype = "string" if delim == '"' else 'link'
            raise TokenizerError(
                "Reached end of formula while parsing %s in %s" %
                (subtype, self.formula))
        match = match.group(0)
        if delim == '"':
            self.items.append(Token.make_operand(match))
        else:
            self.token.append(match)
        return len(match)

    def parse_brackets(self):
        """
        Consume all the text between square brackets [].

        Returns the number of characters matched. (Does not update
        self.offset)

        """
        assert self.formula[self.offset] == '['
        right = self.formula.find(']', self.offset) + 1
        if right == 0:
            raise TokenizerError(
                "Encountered unmatched '[' in %s" % self.formula)
        self.token.append(self.formula[self.offset: right])
        return right - self.offset

    def parse_error(self):
        """
        Consume the text following a '#' as an error.

        Looks for a match in self.ERROR_CODES and returns the number of
        characters matched. (Does not update self.offset)

        """
        self.assert_empty_token()
        assert self.formula[self.offset] == '#'
        subformula = self.formula[self.offset:]
        for err in self.ERROR_CODES:
            if subformula.startswith(err):
                self.items.append(Token.make_operand(err))
                return len(err)
        raise TokenizerError(
            "Invalid error code at position %d in '%s'" %
            (self.offset, self.formula))

    def parse_whitespace(self):
        """
        Consume a string of consecutive spaces.

        Returns the number of spaces found. (Does not update self.offset).

        """
        assert self.formula[self.offset] == ' '
        self.items.append(Token(' ', Token.WSPACE))
        return self.WSPACE_RE.match(self.formula[self.offset:]).end()

    def parse_operator(self):
        """
        Consume the characters constituting an operator.

        Returns the number of charactes consumed. (Does not update
        self.offset)

        """
        if self.formula[self.offset:self.offset + 2] in ('>=', '<=', '<>'):
            self.items.append(Token(
                self.formula[self.offset:self.offset + 2],
                Token.OP_IN
            ))
            return 2
        curr_char = self.formula[self.offset]  # guaranteed to be 1 char
        assert curr_char in '%*/^&=><+-'
        if curr_char == '%':
            token = Token('%', Token.OP_POST)
        elif curr_char in "*/^&=><":
            token = Token(curr_char, Token.OP_IN)
        # From here on, curr_char is guaranteed to be in '+-'
        elif not self.items:
            token = Token(curr_char, Token.OP_PRE)
        else:
            prev = self.items[-1]
            is_infix = (
                prev.subtype == Token.CLOSE
                or prev.type == Token.OP_POST
                or prev.type == Token.OPERAND
            )
            if is_infix:
                token = Token(curr_char, Token.OP_IN)
            else:
                token = Token(curr_char, Token.OP_PRE)
        self.items.append(token)
        return 1

    def parse_opener(self):
        """
        Consumes a ( or { character.

        Returns the number of charactes consumed. (Does not update
        self.offset)

        """
        assert self.formula[self.offset] in ('(', '{')
        if self.formula[self.offset] == '{':
            self.assert_empty_token()
            token = Token.make_subexp("{")
        elif self.token:
            token_value = "".join(self.token) + '('
            del self.token[:]
            token = Token.make_subexp(token_value)
        else:
            token = Token.make_subexp("(")
        self.items.append(token)
        self.token_stack.append(token)
        return 1

    def parse_closer(self):
        """
        Consumes a } or ) character.

        Returns the number of charactes consumed. (Does not update
        self.offset)

        """
        assert self.formula[self.offset] in (')', '}')
        token = self.token_stack.pop().get_closer()
        if token.value != self.formula[self.offset]:
            raise TokenizerError(
                "Mismatched ( and { pair in '%s'" % self.formula)
        self.items.append(token)
        return 1

    def parse_separator(self):
        """
        Consumes a ; or , character.

        Returns the number of charactes consumed. (Does not update
        self.offset)

        """
        curr_char = self.formula[self.offset]
        assert curr_char in (';', ',')
        if curr_char == ';':
            token = Token.make_separator(";")
        else:
            try:
                top_type = self.token_stack[-1].type
            except IndexError:
                token = Token(",", Token.OP_IN)  # Range Union operator
            else:
                if top_type == Token.PAREN:
                    token = Token(",", Token.OP_IN)  # Range Union operator
                else:
                    token = Token.make_separator(",")
        self.items.append(token)
        return 1

    def check_scientific_notation(self):
        """
        Consumes a + or - character if part of a number in sci. notation.

        Returns True if the character was consumed and self.offset was
        updated, False otherwise.

        """
        curr_char = self.formula[self.offset]
        if (curr_char in '+-'
                and len(self.token) >= 1
                and self.SN_RE.match("".join(self.token))):
            self.token.append(curr_char)
            self.offset += 1
            return True
        return False

    def assert_empty_token(self):
        """
        Ensure that there's no token currently being parsed.

        If there are unconsumed token contents, it means we hit an unexpected
        token transition. In this case, we raise a TokenizerError

        """
        if self.token:
            raise TokenizerError(
                "Unexpected character at position %d in '%s'" %
                (self.offset, self.formula))

    def save_token(self):
        """If there's a token being parsed, add it to the item list."""
        if self.token:
            self.items.append(Token.make_operand("".join(self.token)))
            del self.token[:]

    def render(self):
        "Convert the parsed tokens back to a string."
        if not self.items:
            return ""
        elif self.items[0].type == Token.LITERAL:
            return self.items[0].value
        return "=" + "".join(token.value for token in self.items)


class Token(object):

    """
    A token in an Excel formula.

    Tokens have three attributes:

    * `value`: The string value parsed that led to this token
    * `type`: A string identifying the type of token
    * `subtype`: A string identifying subtype of the token (optional, and
                 defaults to "")

    """

    __slots__ = ['value', 'type', 'subtype']

    LITERAL = "LITERAL"
    OPERAND = "OPERAND"
    FUNC = "FUNC"
    ARRAY = "ARRAY"
    PAREN = "PAREN"
    SEP = "SEP"
    OP_PRE = "OPERATOR-PREFIX"
    OP_IN = "OPERATOR-INFIX"
    OP_POST = "OPERATOR-POSTFIX"
    WSPACE = "WHITE-SPACE"

    def __init__(self, value, type_, subtype=""):
        self.value = value
        self.type = type_
        self.subtype = subtype

    # Literal operands:
    #
    # Literal operands are always of type 'OPERAND' and can be of subtype
    # 'TEXT' (for text strings), 'NUMBER' (for all numeric types), 'LOGICAL'
    # (for TRUE and FALSE), 'ERROR' (for literal error values), or 'RANGE'
    # (for all range references).

    TEXT = 'TEXT'
    NUMBER = 'NUMBER'
    LOGICAL = 'LOGICAL'
    ERROR = 'ERROR'
    RANGE = 'RANGE'

    @classmethod
    def make_operand(cls, value):
        "Create an operand token."
        if value.startswith('"'):
            subtype = cls.TEXT
        elif value.startswith('#'):
            subtype = cls.ERROR
        elif value in ('TRUE', 'FALSE'):
            subtype = cls.LOGICAL
        else:
            try:
                float(value)
                subtype = cls.NUMBER
            except ValueError:
                subtype = cls.RANGE
        return cls(value, cls.OPERAND, subtype)


    # Subexpresssions
    #
    # There are 3 types of `Subexpressions`: functions, array literals, and
    # parentheticals. Subexpressions have 'OPEN' and 'CLOSE' tokens. 'OPEN'
    # is used when parsing the initital expression token (i.e., '(' or '{')
    # and 'CLOSE' is used when parsing the closing expression token ('}' or
    # ')').

    OPEN = "OPEN"
    CLOSE = "CLOSE"

    @classmethod
    def make_subexp(cls, value, func=False):
        """
        Create a subexpression token.

        `value`: The value of the token
        `func`: If True, force the token to be of type FUNC

        """
        assert value[-1] in ('{', '}', '(', ')')
        if func:
            assert re.match('.+\\(|\\)', value)
            type_ = Token.FUNC
        elif value in '{}':
            type_ = Token.ARRAY
        elif value in '()':
            type_ = Token.PAREN
        else:
            type_ = Token.FUNC
        subtype = cls.CLOSE if value in ')}' else cls.OPEN
        return cls(value, type_, subtype)

    def get_closer(self):
        "Return a closing token that matches this token's type."
        assert self.type in (self.FUNC, self.ARRAY, self.PAREN)
        assert self.subtype == self.OPEN
        value = "}" if self.type == self.ARRAY else ")"
        return self.make_subexp(value, func=self.type == self.FUNC)

    # Separator tokens
    #
    # Argument separators always have type 'SEP' and can have one of two
    # subtypes: 'ARG', 'ROW'. 'ARG' is used for the ',' token, when used to
    # delimit either function arguments or array elements. 'ROW' is used for
    # the ';' token, which is always used to delimit rows in an array
    # literal.

    ARG = "ARG"
    ROW = "ROW"

    @classmethod
    def make_separator(cls, value):
        "Create a separator token"
        assert value in (',', ';')
        subtype = cls.ARG if value == ',' else cls.ROW
        return cls(value, cls.SEP, subtype)