This file is indexed.

/usr/lib/python3/dist-packages/kajiki/text.py is in python3-kajiki 0.5.3-1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
# -*- coding: utf-8 -*-

'''Text template compiler.

Notable in this module are:

* TextTemplate - function building a template from text string or filename.
* _pattern - the regex used to find the beginnings of tags and expressions.
* _Scanner - scans text and generates a stream of tokens.
* _Parser - parses a stream of tokens into the internal representation (IR)
  tree.
* _Parser._parse_<tagname> - consumes the body of a tag and returns an ir.Node.
'''

from __future__ import (absolute_import, division, print_function,
                        unicode_literals)
import codecs
import re
from .ddict import defaultdict
from itertools import chain
from nine import iteritems, str

from shlex import split as shlex_split  # Prior to Python 2.7.3, the
from sys import version_info            # *shlex* module did not support
if version_info < (2, 7, 3):            # Unicode input. Work around:
    _shlex_split = shlex_split
    shlex_split = lambda txt: _shlex_split(txt.encode('utf-8'))
del version_info

import kajiki
from . import ir

_pattern = r'''
\$(?:
    (?P<expr_escaped>\$) |      # Escape $$
    (?P<expr_named>[_a-z][_a-z0-9.]*) | # $foo.bar
    {(?P<expr_braced>) | # ${....
    (?P<expr_invalid>)
) |
^\s*%(?:
    (?P<tag_bare>[a-z]+) | # %for, %end, etc.
    (?P<tag_bare_invalid>)
)|
^\s*{%-(?P<tag_begin_ljust>[a-z]+)|  # {%-for, {%-end, etc.
{%(?:
    (?P<tag_begin>[a-z]+) | # {%for, {%end, etc.
    (?P<tag_begin_invalid>)
)
'''
_re_pattern = re.compile(_pattern, re.VERBOSE | re.IGNORECASE | re.MULTILINE)


def TextTemplate(source=None, filename=None, autoescape=False,
                 encoding='utf-8'):
    assert source or filename, "You must either provide a *source* argument " \
        "or a *filename* argument to TextTemplate()."
    if source is None:
        with codecs.open(filename, encoding=encoding) as f:
            source = f.read()
    if filename is None:
        filename = '<string>'
    assert isinstance(source, str), \
        "*source* must be a unicode string, not a {}".format(type(source))
    scanner = _Scanner(filename, source)
    tree = _Parser(scanner, autoescape).parse()
    tree.filename = filename
    return kajiki.template.from_ir(tree)


class _Scanner(object):
    def __init__(self, filename, source):
        self.filename = filename
        self.source = source
        self.lineno = 1
        self.pos = 0

    def __iter__(self):
        source = self.source
        for mo in _re_pattern.finditer(source):
            start = mo.start()
            if start > self.pos:
                yield self.text(source[self.pos:start])
                self.pos = start
            groups = mo.groupdict()
            if groups['expr_braced'] is not None:
                self.pos = mo.end()
                yield self._get_braced_expr()
            elif groups['expr_named'] is not None:
                self.pos = mo.end()
                yield self.expr(groups['expr_named'])
            elif groups['expr_escaped'] is not None:
                self.pos = mo.end()
                yield self.text('$')
            elif groups['tag_bare'] is not None:
                self.pos = mo.end()
                yield self._get_tag_bare(groups['tag_bare'])
            elif groups['tag_begin'] is not None:
                self.pos = mo.end()
                yield self._get_tag(groups['tag_begin'])
            elif groups['tag_begin_ljust'] is not None:
                self.pos = mo.end()
                yield self._get_tag(groups['tag_begin_ljust'])
            elif groups['tag_bare_invalid'] is not None:
                continue
            else:
                msg = 'Syntax error %s:%s' % (self.filename, self.lineno)
                for i, line in enumerate(self.source.splitlines()):
                    print('%3d %s' % (i + 1, line))
                print(msg)
                assert False, groups
        if self.pos != len(source):
            yield self.text(source[self.pos:])

    def _get_pos(self):
        return self._pos

    def _set_pos(self, value):
        assert value >= getattr(self, '_pos', 0)
        self._pos = value
    pos = property(_get_pos, _set_pos)

    def text(self, text):
        self.lineno += text.count('\n')
        return _Text(self.filename, self.lineno, text)

    def expr(self, text):
        self.lineno += text.count('\n')
        return _Expr(self.filename, self.lineno, text)

    def tag(self, tagname, body):
        tag = _Tag(self.filename, self.lineno, tagname, body)
        self.lineno += tag.text.count('\n')
        return tag

    def _get_tag_bare(self, tagname):
        end = self.source.find('\n', self.pos)
        if end == -1:
            end = len(self.source)
        body = self.source[self.pos:end]
        self.lineno += 1
        self.pos = end + 1
        return self.tag(tagname, body)

    def _get_tag(self, tagname):
        end = self.source.find('%}', self.pos)
        assert end > 0
        body = self.source[self.pos:end]
        self.pos = end + 2
        if body.endswith('-'):
            body = body[:-1]
            while self.source[self.pos] in ' \t':
                self.pos += 1
        return self.tag(tagname, body)

    def _get_braced_expr(self):
        try:
            compile(self.source[self.pos:], '', 'eval')
        except SyntaxError as se:
            end = se.offset + self.pos
            text = self.source[self.pos:end - 1]
            self.pos = end
            return self.expr(text)


class _Parser(object):
    def __init__(self, tokenizer, autoescape=False):
        self.tokenizer = tokenizer
        self.functions = defaultdict(list)
        self.functions['__main__()'] = []
        self.mod_py = []  # module-level python blocks
        self.iterator = iter(self.tokenizer)
        self.autoescape = autoescape
        self._in_def = False
        self._is_child = False

    def parse(self):
        body = list(self._parse_body())
        self.functions['__main__()'] = body[:-1]
        defs = [ir.DefNode(k, *v) for k, v in iteritems(self.functions)]
        return ir.TemplateNode(self.mod_py, defs)

    def text(self, token):
        text = ''.join(_unescape_newlines(token.text))
        node = ir.TextNode(text)
        node.filename = token.filename
        node.lineno = token.lineno
        return node

    def expr(self, token):
        node = ir.ExprNode(token.text, safe=not self.autoescape)
        node.filename = token.filename
        node.lineno = token.lineno
        return node

    def push_tok(self, token):
        self.iterator = chain([token], self.iterator)

    def _parse_body(self, *stoptags):
        while True:
            try:
                token = next(self.iterator)
                if isinstance(token, _Text):
                    yield self.text(token)
                elif isinstance(token, _Expr):
                    yield self.expr(token)
                elif isinstance(token, _Tag):
                    if token.tagname in stoptags:
                        yield token
                        break
                    parser = getattr(self, '_parse_%s' % token.tagname)
                    yield parser(token)
                else:
                    msg = 'Parse error: %r unexpected' % token
                    assert False, msg
            except StopIteration:
                yield None
                break

    def _parse_def(self, token):
        old_in_def, self._in_def = self._in_def, True
        body = list(self._parse_body('end'))
        self._in_def = old_in_def
        if self._in_def:
            return ir.InnerDefNode(token.body, *body[:-1])
        else:
            self.functions[token.body.strip()] = body[:-1]
            return None

    def _parse_call(self, token):
        b = token.body.find('(')
        e = token.body.find(')', b)
        assert e > b > -1
        arglist = token.body[b:e + 1]
        call = token.body[e + 1:].strip()
        body = list(self._parse_body('end'))
        return ir.CallNode(
            '$caller%s' % arglist,
            call.replace('%caller', '$caller'),
            *body[:-1])

    def _parse_if(self, token):
        body = list(self._parse_body('end', 'else'))
        stoptok = body[-1]
        if stoptok.tagname == 'else':
            self.push_tok(stoptok)
        return ir.IfNode(token.body, *body[:-1])

    def _parse_for(self, token):
        body = list(self._parse_body('end'))
        return ir.ForNode(token.body, *body[:-1])

    def _parse_switch(self, token):
        body = list(self._parse_body('end'))
        return ir.SwitchNode(token.body, *body[:-1])

    def _parse_case(self, token):
        body = list(self._parse_body('case', 'else', 'end'))
        stoptok = body[-1]
        self.push_tok(stoptok)
        return ir.CaseNode(token.body, *body[:-1])

    def _parse_else(self, token):
        body = list(self._parse_body('end'))
        return ir.ElseNode(*body[:-1])

    def _parse_extends(self, token):
        parts = shlex_split(token.body)
        fn = parts[0]
        assert len(parts) == 1
        self._is_child = True
        return ir.ExtendNode(fn)

    def _parse_import(self, token):
        parts = shlex_split(token.body)
        fn = parts[0]
        if len(parts) > 1:
            assert parts[1] == 'as'
            return ir.ImportNode(fn, parts[2])
        else:
            return ir.ImportNode(fn)

    def _parse_include(self, token):
        parts = shlex_split(token.body)
        fn = parts[0]
        assert len(parts) == 1
        return ir.IncludeNode(fn)

    def _parse_py(self, token):
        body = token.body.strip()
        if body:
            body = [ir.TextNode(body), None]
        else:
            body = list(self._parse_body('end'))
        node = ir.PythonNode(*body[:-1])
        if node.module_level:
            self.mod_py.append(node)
            return None
        else:
            return node

    def _parse_block(self, token):
        fname = '_kj_block_' + token.body.strip()
        decl = fname + '()'
        body = list(self._parse_body('end'))[:-1]
        self.functions[decl] = body
        if self._is_child:
            parent_block = 'parent.' + fname
            body.insert(0,
                ir.PythonNode(ir.TextNode('parent_block=%s' % parent_block)))
            return None
        else:
            return ir.ExprNode(decl)


class _Token(object):
    def __init__(self, filename, lineno, text):
        self.filename = filename
        self.lineno = lineno
        self.text = text

    def __repr__(self):  # pragma no cover
        return '<%s %r>' % (
            self.__class__.__name__,
            self.text)


class _Expr(_Token):
    pass


class _Text(_Token):
    pass


class _Tag(_Token):
    def __init__(self, filename, lineno, tagname, body):
        self.tagname = tagname
        self.body = body
        text = tagname + ' ' + body
        super(_Tag, self).__init__(filename, lineno, text)


def _unescape_newlines(text):
    i = 0
    while i < len(text):
        if text[i] == '\\':
            if text[i + 1] != '\n':
                yield text[i + 1]
            i += 2
        else:
            yield text[i]
            i += 1