This file is indexed.

/usr/lib/python2.7/dist-packages/mx/Misc/CSV.py is in python-egenix-mxtools 3.2.7-1build1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
""" CSV (Comma Separated Values) table format reader/writer.

    The two classes read and write CSV data. It's a fairly simple
    format for data exchange with nearly all spreadsheets, databases,
    organizers, etc.

    The reader class is built to be easy on the data passed in: small
    errors like missing commas don't cause an exception (but do set a
    variable to indicate this). Data is converted from CSV text into a
    an internal format which can then be extracted into different
    forms of table representation.

    The writer writes standard CSV files and knows about quoting
    rules, separator handling etc. so that interfacing to spreadsheets
    and databases should pose no problem.

    Both classes can be subclassed to enhance/modify their behaviour.

    Copyright (c) 2000-2005, Marc-Andre Lemburg; mailto:mal@lemburg.com
    Copyright (c) 2000-2013, eGenix.com Software GmbH; mailto:info@egenix.com
    See the documentation for further information on copyrights,
    or contact the author. All Rights Reserved.
"""
__version__ = '1.0'
_debug = 0

import re,sys,types,exceptions
from mx import TextTools, Tools

### Errors

class Error(exceptions.StandardError):
    pass

### Reader base class

class Reader:

    # Did decoding have errors ?
    errors = 0

    # If there were errors, then this list contains the line number of
    # the lines with errors
    errorlines = None
    
    # List of lines, each being a list of strings
    lines = None

    # Width of the received data (max. number of entries per line)
    width = 0
    
    # List of column names found in the data's first row; possibly
    # filtered through .filter_header()
    columns = None

    # String of separator characters which are used to separate items
    # on an input line
    separators = ',;\t'

    # My version, which runs in exponential time for '"abcdefgh...'
    # parseitem = re.compile(r'"([^"]+|"")*"|[^",;\015\012]*')

    # Thanks to Andrew Kuchling for helping me with this "simple" RE
    parseitem = re.compile(r'"([^"]|"")*"|[^"' + separators + r'\015\012]*')

    def __init__(self, separators=None):

        if separators is not None:
            self.parseitem = re.compile(r'"([^"]|"")*"|[^"'
                                        + separators + r'\015\012]*')

    def flush(self):

        """ Empty the object and reset errors
        """
        self.lines = None
        self.width = 0
        self.errors = 0
        self.columns = None

    def load(self, file, header=1, columns=None,

             StringType=types.StringType):

        """ Read a file.

            If header is true (default), the first line of input is
            interpreted as list of column names.

            If columns is given as list of strings, these columns
            names are used.

            If both header and columns are used, columns overrides the
            columns set by reading the header line. This is useful to
            override header information from the input data.

        """
        if type(file) == StringType:
            file = open(file,'rb')
        text = file.read()
        file.close()
        self.lines = self._decode(text)
        if header:
            self.process_header()
        if columns is not None:
            self.set_columns(columns)

    def loads(self, text, header=1, columns=None):

        """ Read the given text

            If header is true (default), the first line of input is
            interpreted as list of column names.

            If columns is given as list of strings, these columns
            names are used.

            If both header and columns are used, columns overrides the
            columns set by reading the header line. This is useful to
            override header information from the input data.

        """
        self.lines = self._decode(text)
        if header:
            self.process_header()
        if columns is not None:
            self.set_columns(columns)

    def list(self):

        """ Return the current data as list of lists, each having
            self.width string entries.

            Missing entries are set to None.

        """
        width = self.width
        lines = self.lines
        table = [None] * len(lines)
        for i, row in Tools.irange(lines):
            row = row[:]
            if len(row) < width:
                row[len(row):] = [None]*(width-len(row))
            table[i] = row
        return table

    def dictionary(self):

        """ Return the current data as dictionary of lists of strings,
            with one entry for each column.

            .columns must have been set using .set_columns() or by
            processing a given CSV header.

        """
        table = {}
        lines = self.lines
        keys = self.columns
        if keys is None:
            raise Error,'no columns set'
        rows = len(lines)
        for k in keys:
            table[k] = [None] * rows
        for i, key in Tools.irange(keys):
            column = table[key]
            for j, row in Tools.irange(lines):
                if len(row) > i:
                    column[j] = row[i]
        return table

    def objects(self,constructor):

        """ Builds a list of objects by calling the given constructor
            with keywords defined by mapping column names to values for
            each input line.

            .columns must have been set using .set_columns() or by
            processing a given CSV header.

        """
        lines = self.lines
        keys = self.columns
        if keys is None:
            raise Error,'no columns set'
        objs = [None] * len(lines)
        for i,line in Tools.irange(lines):
            kws = dict(Tools.tuples(keys, line))
            objs[i] = apply(constructor,(),kws)
        return objs

    def process_header(self):

        """ Process the header data.

            This also sets the .columns attribute. The header is
            removed from the data in .lines after having been
            processed.

            The header is passed through .filter_header() before
            interpreting it as list of column names.

        """
        lines = self.lines
        if len(lines) < 1:
            raise Error,'missing header data'
        self.columns = self.filter_header(lines[0])
        del lines[0]

    def set_columns(self, columns):

        """ Sets the column names to use.

            This overrides any column names possibly given in the read
            data.

        """
        self.columns = columns

    def filter_header(self, header,

                      lower=TextTools.lower):

        """ Filter the given header line.

            The base class converts the column names to all lowercase
            and removes any whitespace included in the header.

            This method is only called in case the header was read
            from the data provided to the object.

        """
        l = [''] * len(header)
        for i,column in Tools.irange(header):
            l[i] = ''.join(lower(column).split())
        return l

    def description(self, header=1):

        """ Return a list of tuples (column name, max length) found in the
            data. 

            If header is true (default), the column names themselves
            are included in the calculation.

        """
        lines = self.lines
        columns = self.columns
        width = len(columns)
        if header:
            lengths = []
            for column in columns:
                lengths.append(len(column))
        else:
            lengths = [0] * width
        for row in self.lines:
            for i,o in Tools.irange(row[:width]):
                if len(o) > lengths[i]:
                    lengths[i] = len(o)
        return map(None,columns,lengths)

    def _decode(self,text):

        """ Decode the CSV data in text.

            Internal method. Do not use directly.
        
        """
        lines = []
        x = 0
        length = len(text)
        width = 0
        errorlines = []
        match = self.parseitem.match
        separators = self.separators
        while x < length:
            l = []
            while x <= length:
                # Find next token
                m = match(text,x)
                if not m:
                    # End of line
                    break
                y = m.regs[0][1]
                l.append(text[x:y])
                if _debug:
                    print x,repr(l[-1])
                x = y + 1
                if x > length:
                    break
                # Check validity
                if text[y:x] not in separators:
                    if text[y:y+2] == '\015\012':
                        # Handle CRLF
                        x = y + 2
                    elif text[y:x] not in '\015\012':
                        # Syntax error: missing ',' or ';'
                        # Action: skip to end of line
                        y = text.find('\012', x)
                        if y < 0:
                            y = text.find('\015', x)
                        if y < 0:
                            # Skip to end of text
                            x = length
                        else:
                            x = y + 1
                        if _debug:
                            print 'errors in',l,x
                        errorlines.append(len(lines))
                    # else: found single CR or LF
                    break
            if len(l) > width:
                width = len(l)
            if _debug:
                print 'adding',l,x,repr(text[x:x+5])
            lines.append(l)
        self.width = width
        if errorlines:
            self.errors = 1
            self.errorlines = errorlines
        return map(self._unquote,lines)

    def _unquote(self,line):

        """ Unquote a CSV style quoted line of text.

            Internal method. Do not use directly.
        
        """
        for i,text in Tools.irange(line):
            if text[:1] == '"' and text[-1:] == '"':
                text = text[1:-1]
            line[i] = text.replace('""','"')
        return line

    def __len__(self):

        return len(self.lines)

    def __str__(self):

        lines = self.list()
        desc = self.description()
        width = 0
        output = []
        write = output.append
        for col in desc:
            write('%-*s|' % (col[1],col[0]))
        write('\n')
        for col in desc:
            write('=' * col[1] + '+')
        write('\n')
        for line in lines:
            for i,item in Tools.irange(line):
                write('%-*s|' % (desc[i][1],item))
            write('\n')
        return ''.join(output)

### Writer base class

class Writer:

    # Column names
    columns = None

    # CSV text
    text = ''

    # Separator to use for separating fields of a row (default: comma)
    separator = ','

    # End-of-line marker to use (default: CRLF)
    lineend = '\015\012'

    def __init__(self, separator=None, lineend=None):

        if separator is not None:
            self.separator = separator
        if lineend is not None:
            self.lineend = lineend

    def flush(self):

        """ Flush the data currently stored in the writer.
        """
        self.text = ''
        self.columns = None

    def set_columns(self, columns, header=1):

        """ Sets the output columns.

            If header is true, a column name line is added to the
            output.

            Columns can only be set once per session and must be set
            prior to adding any data. columns has to be a list of
            column names.

            It is assured that no more than len(columns) items are
            written for each row. All rows are filled up with ""
            entries to have an equal number of items.

        """
        if columns == self.columns and not header:
            # Nothing to do
            return
        elif self.columns:
            raise Error,'cannot write columns more than once per session'
        self.columns = columns
        if header:
            if self.text:
                raise Error,'cannot add header to already written data'
            headerline = self._quote(columns)
            self.text = self.separator.join(headerline) + self.lineend

    def feed_list(self,table):

        """ Feeds a table (list of rows) which is converted
            to CSV. 

            No more than len(columns) items are written for each
            row. All rows are filled up with "" entries to have an
            equal number of items. None entries are converted to empty
            strings, all other objects are stringified.

        """
        columns = self.columns
        if columns:
            rowlen = len(columns)
        else:
            # Calculate the max. number of columns in the table
            rowlen = max(map(len,table))

        # Prepare an empty table
        t = [None] * len(table)
        _quote = self._quote

        # Fill in data
        for i,row in Tools.irange(table):
            row = _quote(row[:rowlen])
            if len(row) < rowlen:
                row[len(row):] = ['""'] * (rowlen - len(row))
            t[i] = self.separator.join(row)

        # Add final CRLF and add as CSV text
        t.append('')
        self.text = self.text + self.lineend.join(t)

    def feed_dict(self,table,rows=None):

        """ Feeds a table (dict of lists) which is converted
            to CSV. 

            Only the keys set as column names are used to form the CSV
            data.

            All lists in the dictionary must have equal length or at
            least rows number of entries, if rows is given. None
            entries are converted to empty strings, all other objects
            are stringified.

        """
        columns = self.columns
        if not columns:
            raise Error,'no output columns set'
        rowlen = len(columns)

        # Create an emtpy table
        if not rows:
            rows = 0
            for column in columns:
                nrows = len(table[column])
                if nrows > rows:
                    rows = nrows
        rowindices = Tools.trange(rows)
        t = [None] * rows
        for i in rowindices:
            t[i] = [None] * rowlen
            
        # Fill the table
        for j,k in Tools.irange(columns):
            for i in rowindices:
                t[i][j] = table[k][i]
                
        # Quote and join lines
        t = [self.separator.join(self._quote(x)) for x in t]

        # Add final CRLF and store CSV text
        t.append('')
        self.text = self.text + self.lineend.join(t)

    def feed_objects(self,objects,

                     getattr=getattr):

        """ Feeds a sequence of objects which is converted to CSV. 

            For each object the set column names are interpreted as
            object attributes and used as basis for the CSV data.

            None values are converted to empty strings, all other
            attributes are added stringified.

        """
        columns = self.columns
        if not columns:
            raise Error,'no output columns set'
        rowlen = len(columns)

        # Create an emtpy table
        rows = len(objects)
        rowindices = Tools.trange(rows)
        t = [None] * rows
        for i in rowindices:
            t[i] = [None] * rowlen
            
        # Fill the table
        icols = Tools.irange(columns)
        for i in rowindices:
            obj = objects[i]
            for j,name in icols:
                t[i][j] = str(getattr(obj, name))
                
        # Quote and join lines
        t = [self.separator.join(self._quote(x)) for x in t]

        # Add final CRLF and store CSV text
        t.append('')
        self.text = self.text + self.lineend.join(t)

    def dumps(self):

        """ Returns the data as CSV text
        """
        return self.text

    def dump(self, file,

             StringType=types.StringType):

        """ Write the converted CSV data to a file
        """
        if type(file) == StringType:
            file = open(file,'wb')
            file.write(self.text)
            file.close()
        else:
            file.write(self.text)

    def _quote(self, line,

               str=str):

        """ CSV style quote the given line of text.
        """
        nline = ['""'] * len(line)
        for i,item in Tools.irange(line):
            if item is not None:
                text = str(item)
            else:
                text = ''
            nline[i] = '"%s"' % text.replace('"','""')
        return nline

def _test():
    import sys
    global _debug

    # Turn on debugging output
    #_debug = 1

    s = """"key1","key2",key3,key4
"abc",0,,0,"def""ghi
Eine neue Zeile
0","hallo",
"line",with"errors,
"text with,embedded,commas",2,3,4
newline,new,luck,;123,""\015
\"""Quote""\",1,2,3,"""#"

    r = Reader()
    r.loads(s)
    lt = r.list()
    print lt
    print r.dictionary()
    r.set_columns(('Name','Vorname'))
    print r.dictionary()
    print r.description()
    print '-' * 72


    w = Writer()
    w.feed_list(r.list())
    w.feed_list(r.list())
    s = w.dumps()
    print s

    #w.dump('test.txt')
    w.flush()
    dict = r.dictionary()
    w.set_columns(dict.keys())
    w.feed_dict(dict)
    s = w.dumps()
    print '-->',s

    r.flush()
    r.loads(s)
    lt = r.list()
    print lt
    print r.dictionary()
    r.set_columns(('Name','Vorname'))
    print r.dictionary()
    print r.description()
    print '-' * 72

    class Obj:
        pass
    data = []
    for i in range(10):
        o = Obj()
        o.id = i
        o.str = str(i)
        data.append(o)
    w.flush()
    w.set_columns(('id', 'str'))
    w.feed_objects(data)
    w.feed_objects(data)
    s = w.dumps()
    print '-->',s
    r.flush()
    r.loads(s)
    lt = r.list()
    print lt
    print r.dictionary()
    r.set_columns(('id','str'))
    print r.dictionary()
    print r.description()
    print '-' * 72


    print
    print 'Read File:',sys.argv[1]
    r.flush()
    f = open(sys.argv[1])
    r.load(f)
    print r

if __name__ == '__main__':
    _test()