This file is indexed.

/usr/lib/python2.7/dist-packages/tinycss/parsing.py is in python-tinycss 0.4-1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
# coding: utf-8
"""
    tinycss.parsing
    ---------------

    Utilities for parsing lists of tokens.

    :copyright: (c) 2012 by Simon Sapin.
    :license: BSD, see LICENSE for more details.
"""

from __future__ import unicode_literals


# TODO: unit tests

def split_on_comma(tokens):
    """Split a list of tokens on commas, ie ``,`` DELIM tokens.

    Only "top-level" comma tokens are splitting points, not commas inside a
    function or other :class:`ContainerToken`.

    :param tokens:
        An iterable of :class:`~.token_data.Token` or
        :class:`~.token_data.ContainerToken`.
    :returns:
        A list of lists of tokens

    """
    parts = []
    this_part = []
    for token in tokens:
        if token.type == 'DELIM' and token.value == ',':
            parts.append(this_part)
            this_part = []
        else:
            this_part.append(token)
    parts.append(this_part)
    return parts


def strip_whitespace(tokens):
    """Remove whitespace at the beggining and end of a token list.

    Whitespace tokens in-between other tokens in the list are preserved.

    :param tokens:
        A list of :class:`~.token_data.Token` or
        :class:`~.token_data.ContainerToken`.
    :return:
        A new sub-sequence of the list.

    """
    for i, token in enumerate(tokens):
        if token.type != 'S':
            break
    else:
        return []  # only whitespace
    tokens = tokens[i:]
    while tokens and tokens[-1].type == 'S':
        tokens.pop()
    return tokens


def remove_whitespace(tokens):
    """Remove any top-level whitespace in a token list.

    Whitespace tokens inside recursive :class:`~.token_data.ContainerToken`
    are preserved.

    :param tokens:
        A list of :class:`~.token_data.Token` or
        :class:`~.token_data.ContainerToken`.
    :return:
        A new sub-sequence of the list.

    """
    return [token for token in tokens if token.type != 'S']


def validate_value(tokens):
    """Validate a property value.

    :param tokens:
        an iterable of tokens
    :raises:
        :class:`ParseError` if there is any invalid token for the 'value'
        production of the core grammar.

    """
    for token in tokens:
        type_ = token.type
        if type_ == '{':
            validate_block(token.content, 'property value')
        else:
            validate_any(token, 'property value')


def validate_block(tokens, context):
    """
    :raises:
        :class:`ParseError` if there is any invalid token for the 'block'
        production of the core grammar.
    :param tokens: an iterable of tokens
    :param context: a string for the 'unexpected in ...' message

    """
    for token in tokens:
        type_ = token.type
        if type_ == '{':
            validate_block(token.content, context)
        elif type_ not in (';', 'ATKEYWORD'):
            validate_any(token, context)


def validate_any(token, context):
    """
    :raises:
        :class:`ParseError` if this is an invalid token for the
        'any' production of the core grammar.
    :param token: a single token
    :param context: a string for the 'unexpected in ...' message

    """
    type_ = token.type
    if type_ in ('FUNCTION', '(', '['):
        for token in token.content:
            validate_any(token, type_)
    elif type_ not in ('S', 'IDENT', 'DIMENSION', 'PERCENTAGE', 'NUMBER',
                       'INTEGER', 'URI', 'DELIM', 'STRING', 'HASH', ':',
                       'UNICODE-RANGE'):
        if type_ in ('}', ')', ']'):
            adjective = 'unmatched'
        else:
            adjective = 'unexpected'
        raise ParseError(
            token, '{0} {1} token in {2}'.format(adjective, type_, context))


class ParseError(ValueError):
    """Details about a CSS syntax error. Usually indicates that something
    (a rule or a declaration) was ignored and will not appear as a parsed
    object.

    This exception is typically logged in a list rather than being propagated
    to the user API.

    .. attribute:: line

        Source line where the error occured.

    .. attribute:: column

        Column in the source line where the error occured.

    .. attribute:: reason

        What happend (a string).

    """
    def __init__(self, subject, reason):
        self.line = subject.line
        self.column = subject.column
        self.reason = reason
        super(ParseError, self).__init__(
            'Parse error at {0.line}:{0.column}, {0.reason}'.format(self))