This file is indexed.

/usr/share/pyshared/Eikazo/Processor.py is in eikazo 0.5.2-8.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
"""
Copyright (c) Abel Deuring 2006 <adeuring@gmx.net>

This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.


Processors are classes that implement some sort of processing:
  - scanning itself is done by a processor
  - saving to a file
  - printing
  - displaying scan data in a widget
  - image manipulation: deskewing, gray -> bi-level, addition of an
    ICC profile etc etc

- Every processor has exactly one input, connected to exactly
  input producer
- processors may produce output for other processors. In this case,
  their output may be fed to more than one processor. (example
  application: simultaneous printing and saving of scans)

- Every processor takes a class Scanjob instance as its input
- Error handling: If an error occurs, processors may "reject"
  a job. In this case, the job data must not be modified, and the
  job is passed back to the input processor
  
  This file contains mostly interface definitions
  
  All processors must be "threading-aware". I.e. they must maintain 
  a state; all methods may be called from another thread.

  Trivial processors can simply do all processing in their append()
  method  
"""  

import sys, time, traceback, weakref
from SaneError import SaneError
import threading, SaneThread
import gobject
import I18n
DEBUG = 1

t = I18n.get_translation('eikazo')
if t:
    _ = t.gettext
else:
    _ = lambda x: x

# "serial number" for jobs.
_jobid = 0
# paranoia check: which attributes may be set for a SaneScanJob
# instance. This ensures that attributes are not lost in SaneScanJob.copy
#
# attributes we don't want:
# 'error'
# attributes that need a "deep copy"
_deepcopyattr = ('img', )
# attributes that may be copied using sequence oprators
_seqattr = ('scanwindow',)
# attributes that are copied by value or reference, in the constructor call
_refattr = ('orig_id', 'copies', 'owner')
# attributes thay may exist and that can be copied by value:
_simpleattr = ('resolution', 'y_resolution', 'duplex_status_backside')
# unique/independed attributes
_uniqattr = ('id', 'status', 'active', 'deleted')
_jobattr = _deepcopyattr + _seqattr + _refattr + _simpleattr + _uniqattr
_joblist = []
class SaneScanJob:
    def __init__(self, owner, orig_id=None, copies=None):
        """ container for job data. Initially quite dumb,
            but instances of this class will become "better populated"
            with attributes later on. The scan processor for example
            adds the image data and scan parameters
        """
        # status: a dict, where processors can put display
        # informations about the scan status
        global _jobid
        self.id = _jobid
        _jobid += 1
        self.owner = owner
        
        self.orig_id = orig_id or self.id
        
        self.status = {}
        self.active = False
        if copies is None:
            self.copies = []
        else:
            self.copies = copies
        self.copies.append(weakref.ref(self))
        self.deleted = False
        add_joblist(self)
    
    def copy(self):
        """return a copy of self. called by processsors
           which have more than one output.
        """ 
        res = SaneScanJob(self.owner, self.orig_id, self.copies)
        
        # check that we know about all attributes
        test = self.__dict__.keys()
        test = [x for x in test if not x in _jobattr]
        if test:
            raise SaneError("SaneScanJob.copy: unexpected attributes: %s" % \
                            repr(test))
        if hasattr(self, 'img'):
            res.img = self.img.copy()
        
        for name in _seqattr:
            if hasattr(self, name):
                setattr(res, name, getattr(self, name)[:])
        for name in _simpleattr:
            if hasattr(self, name):
                setattr(res, name, getattr(self, name))
        
        return res
    
    def has_error(self):
        return hasattr(self, 'error')
    
    def set_active(self, v):
        self.active = v
    
    def is_active(self):
        return self.active



def add_joblist(job):
    for i in xrange(len(_joblist)-1, -1, -1):
        test = _joblist[i]()
        if test is None:
            _joblist.pop(i)
    _joblist.append(weakref.ref(job))

def mark_deleted(min_id):
    for i in xrange(len(_joblist)-1, -1, -1):
        job = _joblist[i]()
        if job is None:
            _joblist.pop(i)
        elif job.id >= min_id:
            job.deleted += 1

class SaneInputProducer:
    def __init__(self):
        pass
    
    def next_job(self):
    	""" called by the processor, when it has finished a job and
    	    is ready to accept a new job.
    	"""
    	raise SaneError("SaneInputProducer.next_job must be overloaded")


class SaneProcessorNotifyHub(gobject.GObject):
    """ We have several more or less independently working processors,
        without any own display, but these processors must be able
        to notify a display widget about a scan job.
        
        This class provides a "signal emitter", to which display
        widgets can connect.
        
        The signal name is "sane-jobinfo"
        
        We must also be aware of threading. Hence this class 
        uses (obviously thread-safe) function gobject.idle_add to emit 
        a signal, instead of emitting it directly.
    """
    def __init__(self):
        gobject.GObject.__init__(self)
    
    def notify(self, msg, job, proc):
        gobject.idle_add(_Notify(self, msg, job, proc))
    

class _Notify:
    def __init__(self, hub, msg, job, proc):
        self.hub = hub
        self.msg = msg
        self.job = job
        self.proc = proc

    def __call__(self):
        self.hub.emit('sane-jobinfo', self.msg, self.job, self.proc)

# parameters:
# 1: info type, string, from _displayaction.keys()
# 2: SaneScanJob instance
# 3: processor emitting the signal
gobject.signal_new('sane-jobinfo', SaneProcessorNotifyHub,
                   gobject.SIGNAL_RUN_FIRST | gobject.SIGNAL_ACTION,
                   gobject.TYPE_NONE,
                   (gobject.TYPE_PYOBJECT,
                    gobject.TYPE_PYOBJECT,
                    gobject.TYPE_PYOBJECT, ))


class SaneProcessor(gobject.GObject):
    def __init__(self, input_producer, notify_hub):
        """ input_producer: an instance of class InputProducer
            abstract base class
        """
        gobject.GObject.__init__(self)
        self.input_producer = input_producer
        self.output = []
        self.notify_hub = notify_hub
        self.errorjobs = []
        
    def append(self, job):
        """ called by the input processor to append a new job.
            raises an exception, if jobs cannot be appended
        """
    	raise SaneError("SaneProcessor.append must be overloaded")
    
    def can_append(self, job):
    	""" True, if jobs can be appended, else false
    	"""
    	raise SaneError("SaneProcessor.can_append must be overloaded")
    
    def numjobs(self, cascade):
        """ return the number of jobs queued in the instance.
            If cascade is true, add the number of all jobs in 
            following processors
        """
    	raise SaneError("SaneProcessor.jobs must be overloaded")
    
    def delete_job(self, job):
        """ delete a queued job, and all job which a farther 
            back in the queued (i.e., those with larger IDs)
        """
        raise SaneError("SaneProcessor.delete_job must be overloaded")
    
    def delete_from_id(self, id):
        """ delete a queued job, and all job which a farther 
            back in the queued (i.e., those with larger IDs)
        """
        raise SaneError("SaneProcessor.delete_from_id must be overloaded")

    def retry_job(self):
        """ retry a job which resulted in an error
        """
        raise SaneError("SaneProcessor.delete_jobs must be overloaded")
    
    def add_output(self, processor):
        """ add an output instance. processor is a SaneProcessor instance
        """
        self.output.append(processor)
    
    def remove_output(self, processor):
        """ remove a processor from the list of outputs. If the processor
            is not listed in the output list, silently ignore it
        """
        for i in range(len(self.output)-1, -1, -1):
            if self.output[i] == processor:
                self.output.pop(i)
    
    def get_output(self):
        """ return a copy of the list of outputs
        """
        return self.output[:]
        
    def set_notify_hub(self, notify_hub):
        self.notify_hub = notify_hub
    
    def notify(self, msg, job):
        if self.notify_hub:
            self.notify_hub.notify(msg, job, self)
    
    def can_retry(self, job):
        """ check, if a job in error status can be re-queued
            Should be overloaded by derived classes
        """
        return False

    def retry_job(self, job):
        """ retry a job. If successful, return True, else False
        """
        return False
    
    def can_edit(self, job):
        """ check, if a job in error status can be edited
            Should be overloaded by derived classes
        """
        return False

    def edit_job(self, job):
        """ edit a job. If successful, return True, else False
        """
        return False
    
    def set_input(self, input):
        if self.input_producer:
            self.input_producer.remove_output(self)
        self.input_producer = input
        if input:
            input.add_output(self)

    def send_toOutput(self, job):
        """ send the job to all defined outputs
        """
        raise SaneError("SaneProcessor.send_toOutput must be overloaded")
    	
class SaneQueueingProcessor(SaneProcessor):
    """ variant of SaneProcessor which implements "real" queueing
    """
    def __init__(self, input_producer, notify_hub, queue_length):
        SaneProcessor.__init__(self, input_producer, notify_hub)
        self.queue = []
        self.queue_length = queue_length
    
    def append(self, job):
        if self.can_append(job):
            self.queue.append(job)
            job.owner = self
            return
        raise SaneError("SaneQueueingProcessor.append: queue full")
    
    def can_append(self, job):
        return len(self.queue) < self.queue_length
    
    def numjobs(self, cascade):
        res = len(self.queue)
        if cascade:
            for o in self.output:
                res += o.numjobs(True)
        return res

    def send_toOutput(self, job):
        res = True
        if self.output:
            olist = self.output[:]
            # try for 60 seconds to queue the job
            for i in xrange(1200):
                if job.deleted:
                    self.notify('removed', job)
                    res = False
                    olist = []
                    break
                for j in xrange(len(olist)-1, -1, -1):
                    o = olist[j]
                    if o.can_append(job):
                        if len(olist) > 1:
                            newjob = job.copy()
                            self.notify('new job', newjob)
                            o.append(newjob)
                            # appending can fail. Example: No output enabled.
                            # The we must notify the job deletion here
                            if newjob.owner == self:
                                self.notify('removed', newjob)
                        else:
                            o.append(job)
                        olist.pop(j)
                if not olist:
                    break
                time.sleep(0.05)
            if olist:
                raise SaneError('output queue(s) blocked')
        return res
    

class SaneThreadingQueueingProcessor(SaneQueueingProcessor, SaneThread.Thread):
    """ SaneQueueingProcessor with threading support. The thread
        NOT automatically started!
    """
    def __init__(self, input_producer, notify_hub, queue_length):
        SaneQueueingProcessor.__init__(self, input_producer, notify_hub,
                                       queue_length)
        SaneThread.Thread.__init__(self)
        self.queuelock = threading.RLock()
    
    def append(self, job, blocking=1):
        if self.queuelock.acquire(blocking):
            try:
                SaneQueueingProcessor.append(self, job)
            finally:
                self.queuelock.release()
    
    def can_append(self, job):
        return (len(self.queue) < self.queue_length)

    def delete_from_id(self, id):
        # delete all jobs with a job ID >= id
        # Start with the largest ID
        self.input_producer.delete_from_id(id)
        self.queuelock.acquire()
        queue = self.queue
        dellist = [(x, queue) for x in queue if x.id >= id]
        errlist = self.errorjobs
        dellist += [(x, errlist) for x in errlist if x.id >= id]
        dellist.sort(lambda x,y: cmp(y[0].id, x[0].id))
        for j,l in dellist:
            i = l.index(j)
            l.pop(i)
            self.notify('removed', j)
        self.queuelock.release()
    
    def delete_job(self, job):
        for j in job.copies:
            j = j()
            if j is not None:
                mark_deleted(j.id)
                j.owner.delete_from_id(j.orig_id)

    def numjobs(self, cascade):
        self.queuelock.acquire()
        res = len(self.queue) + len(self.errorjobs)
        self.queuelock.release()
        if cascade:
            for o in self.output:
                res += o.numjobs(True)
        return res
    


class SaneScannerControl(SaneThreadingQueueingProcessor, SaneInputProducer):
    def __init__(self, device, input_producer, notify_hub, queue_length):
        """ device: gtkWidgets.SaneDevice instance
        """
        SaneThreadingQueueingProcessor.__init__(self, input_producer, 
                                                notify_hub, queue_length)
        SaneInputProducer.__init__(self)
        self.device = device
        self.output = []
        self.errorjobs = []
        self.start()
        self.status = 0 # idle
        # track the "duplex status": We must know, if the next
        # scan will be a backside duplex scan
        self.duplex_scanner_status_backside = False
        # ... and if the next queued job will be a backside scan
        self.duplex_input_backside = False
        # debugging: check, if restarting a backside scan in duplex
        # mode works. Set to True for to force an error
        self.TEST = False
    
    def can_append(self, job):
        return SaneThreadingQueueingProcessor.can_append(self, job) and not self.errorjobs
    
    def append(self, job):
        job.duplex_status_backside = self.duplex_input_backside
        SaneThreadingQueueingProcessor.append(self, job)
        if self.device.duplex_mode():
            self.duplex_input_backside = not self.duplex_input_backside
        job.status['scan'] = _('waiting for scan')
        self.notify('new job', job)
    
    def reset_duplex(self, input):
        """ reset the duplex status. If input is True, reset both
            duplex_input_backside and duplex_scanner_status_backside,
            else only duplex_scanner_status_backside
        """
        self.duplex_scanner_status_backside = False
        if input:
            self.duplex_input_backside = False
    
    def run(self):
        while not self.abort:
            if len(self.queue) and not self.errorjobs:
                if DEBUG:
                    print "starting scan", self.queue[0].id
                self.queuelock.acquire()
                job = self.queue.pop(0)
                self.status = 1 # scanning
                self.queuelock.release()
                job.set_active(True)
                try:
                    job.status['scan'] = _('scanning')
                    self.notify('status changed', job)
                    # collect relevant scan information
                    try:
                        job.scanwindow = (self.device.tl_x, self.device.br_x,
                                          self.device.tl_y, self.device.br_y)
                    except KeyError:
                        # for backends that do not provide tl_x, tl_y etc
                        xmax, ymax = self.device._device.get_parameters()[2]
                        job.scanwindow = (0, xmax, 0, ymax)
                    try:
                        job.resolution = self.device._device.resolution
                    except AttributeError:
                        # Insert a fake value... The resolution is required
                        # in several output and postprocessing plugins; 
                        # especially for printing,it it more or less required, 
                        # unless the option to properly scale the print output
                        # is dropped. 
                        # Similary, the UI for some postprocessing plugins
                        # assumes a defined resolution, and optionally
                        # ignoring it would make the UI creation a bit
                        # complicated.
                        # FIXME: add a warning about "faked" resolution
                        # somewhere?
                        # FIXME: allow to define the resolution via os.getenv
                        # or some config option?
                        job.resolution = 72
                    if 'y_resolution' in self.device.getOptionNames():
                        job.y_resolution = self.device._device.y_resolution
                    else:
                        job.y_resolution = job.resolution
                    
                    if self.TEST and self.duplex_scanner_status_backside:
                        # xxx test: force an error to see, if requeueing a
                        # a backside job works as expected
                        self.TEST = False
                        raise "duplex requeueing test"
                        
                    # paranoia: make sure that the duplex status of the scanner
                    # stays synchronous with the status as "thought of" by this
                    # class. Unfortunately, the Sane standard has no way to
                    # tell for duplex scanners, if the next start()/snap() calls
                    # will deliver front side or back side data, neither 
                    # provides any backend for duplex scanners an option 
                    # that would allow to get the actual scanner status.
                    # So let's "reset" the backend before each frontside 
                    # scan. A sane_cancel flushes possibly buffered 
                    # "back side data"
                    if not self.duplex_scanner_status_backside:
                        self.device._device.cancel()
                    
                        # now we may have the (admittedly unlikely) situation
                        # that a job for backside data is requeued, without the
                        # corresponding front side job being requeued. Not all
                        # backends for duplex scanners support backside-only
                        # scans, so we start the frontside scan too, but omit 
                        # the data
                        if job.duplex_status_backside:
                            print "dropping front side data"
                            self.device._device.start()
                            self.duplex_scanner_status_backside = \
                                not self.duplex_scanner_status_backside
                            # no check, if no_cancel is supported:
                            # duplex scans are reasonable only for PIL.sane
                            # version that DO support this mode.
                            junk = self.device._device.snap(no_cancel=1)
                            junk.save('TEST.tif')
                    
                    # we must call this before sane_start, because 
                    # not all backends allow to read an option AFTER
                    # a scan has been started.
                    in_duplex_mode = self.device.duplex_mode()

                    self.device._device.start()
                    scanparms = self.device._device.get_parameters()

                    if in_duplex_mode:
                        self.duplex_scanner_status_backside = \
                            not self.duplex_scanner_status_backside
                    # FIXME: terrible workaround...
                    # We want to use the no_cancel option, but it is
                    # no everywhere available: a quite recent bugfix
                    # -> "enforce" usage of sufficiently recent version
                    # of the sane module??
                    try:
                        img = self.device._device.snap(no_cancel=1)
                    except TypeError, val:
                        if str(val) == "snap() got an unexpected keyword argument 'no_cancel'":
                            img = self.device._device.snap()
                        else:
                            raise
                    # the sane module delivers a gray scale image even for
                    # lineart scans. 
                    if scanparms[0] == 'L' and scanparms[3] == 1:
                        img = img.convert('1')
                    job.img = img
                    job.status['scan'] = _("scanned")
                    self.notify('status changed', job)
                    queue_ok = self.send_toOutput(job)
                    if job.owner == self:
                        self.notify('removed', job)

                    if self.can_append(None) and queue_ok:
                        self.input_producer.next_job()
                        
                except:
                    job.error = sys.exc_info()
                    job.set_active(False)
                    job.status['scan'] = _('scan error')
                    self.notify('status changed', job)
                    self.queuelock.acquire()
                    self.errorjobs.append(job)
                    self.queuelock.release()
                    if DEBUG:
                        print str(job.error[0]), str(job.error[1])
                        traceback.print_tb(job.error[2])
                self.status = 0 # idle
                if DEBUG:
                    print "scan finished", job.id
            else:
                time.sleep(0.1)

    def numjobs(self, cascade):
        self.queuelock.acquire()
        res = len(self.queue) + len(self.errorjobs)
        if self.status != 0:
            res += 1
        self.queuelock.release()
        if cascade:
            for o in self.output:
                res += o.numjobs(True)
        return res
    
    def retry_job(self, job):
        res = False
        self.queuelock.acquire()
        for i in xrange(len(self.errorjobs)):
            if job == self.errorjobs[i]:
                self.errorjobs.pop(i)
                del job.error 
                # scan jobs should be processed in the sequence of their
                # job ids
                self.queue.append(job)
                self.queue.sort(lambda x,y: cmp(x.id, y.id))
                res = True
                break
        self.queuelock.release()
        self.reset_duplex(False)
        return res
    
    def delete_job(self, job):
        # reset the duplex status. Otherwise, the duplex logic will
        # out of sync
        self.reset_duplex(True)
        SaneThreadingQueueingProcessor.delete_job(self, job)
    
    def can_retry(self, job):
        """ check, if a job in error status can be re-queued
            Should be overloaded by derived classes
        """
        return job in self.errorjobs