This file is indexed.

/usr/lib/python2.7/dist-packages/celery/events/state.py is in python-celery 3.1.20-1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
# -*- coding: utf-8 -*-
"""
    celery.events.state
    ~~~~~~~~~~~~~~~~~~~

    This module implements a datastructure used to keep
    track of the state of a cluster of workers and the tasks
    it is working on (by consuming events).

    For every event consumed the state is updated,
    so the state represents the state of the cluster
    at the time of the last event.

    Snapshots (:mod:`celery.events.snapshot`) can be used to
    take "pictures" of this state at regular intervals
    to e.g. store that in a database.

"""
from __future__ import absolute_import

import bisect
import sys
import threading

from datetime import datetime
from decimal import Decimal
from itertools import islice
from operator import itemgetter
from time import time
from weakref import ref

from kombu.clocks import timetuple
from kombu.utils import cached_property, kwdict

from celery import states
from celery.five import class_property, items, values
from celery.utils import deprecated
from celery.utils.functional import LRUCache, memoize
from celery.utils.log import get_logger

PYPY = hasattr(sys, 'pypy_version_info')

# The window (in percentage) is added to the workers heartbeat
# frequency.  If the time between updates exceeds this window,
# then the worker is considered to be offline.
HEARTBEAT_EXPIRE_WINDOW = 200

# Max drift between event timestamp and time of event received
# before we alert that clocks may be unsynchronized.
HEARTBEAT_DRIFT_MAX = 16

DRIFT_WARNING = """\
Substantial drift from %s may mean clocks are out of sync.  Current drift is
%s seconds.  [orig: %s recv: %s]
"""

CAN_KWDICT = sys.version_info >= (2, 6, 5)

logger = get_logger(__name__)
warn = logger.warning

R_STATE = '<State: events={0.event_count} tasks={0.task_count}>'
R_WORKER = '<Worker: {0.hostname} ({0.status_string} clock:{0.clock})'
R_TASK = '<Task: {0.name}({0.uuid}) {0.state} clock:{0.clock}>'

__all__ = ['Worker', 'Task', 'State', 'heartbeat_expires']


@memoize(maxsize=1000, keyfun=lambda a, _: a[0])
def _warn_drift(hostname, drift, local_received, timestamp):
    # we use memoize here so the warning is only logged once per hostname
    warn(DRIFT_WARNING, hostname, drift,
         datetime.fromtimestamp(local_received),
         datetime.fromtimestamp(timestamp))


def heartbeat_expires(timestamp, freq=60,
                      expire_window=HEARTBEAT_EXPIRE_WINDOW,
                      Decimal=Decimal, float=float, isinstance=isinstance):
    # some json implementations returns decimal.Decimal objects,
    # which are not compatible with float.
    freq = float(freq) if isinstance(freq, Decimal) else freq
    if isinstance(timestamp, Decimal):
        timestamp = float(timestamp)
    return timestamp + (freq * (expire_window / 1e2))


def _depickle_task(cls, fields):
    return cls(**(fields if CAN_KWDICT else kwdict(fields)))


def with_unique_field(attr):

    def _decorate_cls(cls):

        def __eq__(this, other):
            if isinstance(other, this.__class__):
                return getattr(this, attr) == getattr(other, attr)
            return NotImplemented
        cls.__eq__ = __eq__

        def __ne__(this, other):
            return not this.__eq__(other)
        cls.__ne__ = __ne__

        def __hash__(this):
            return hash(getattr(this, attr))
        cls.__hash__ = __hash__

        return cls
    return _decorate_cls


@with_unique_field('hostname')
class Worker(object):
    """Worker State."""
    heartbeat_max = 4
    expire_window = HEARTBEAT_EXPIRE_WINDOW

    _fields = ('hostname', 'pid', 'freq', 'heartbeats', 'clock',
               'active', 'processed', 'loadavg', 'sw_ident',
               'sw_ver', 'sw_sys')
    if not PYPY:
        __slots__ = _fields + ('event', '__dict__', '__weakref__')

    def __init__(self, hostname=None, pid=None, freq=60,
                 heartbeats=None, clock=0, active=None, processed=None,
                 loadavg=None, sw_ident=None, sw_ver=None, sw_sys=None):
        self.hostname = hostname
        self.pid = pid
        self.freq = freq
        self.heartbeats = [] if heartbeats is None else heartbeats
        self.clock = clock or 0
        self.active = active
        self.processed = processed
        self.loadavg = loadavg
        self.sw_ident = sw_ident
        self.sw_ver = sw_ver
        self.sw_sys = sw_sys
        self.event = self._create_event_handler()

    def __reduce__(self):
        return self.__class__, (self.hostname, self.pid, self.freq,
                                self.heartbeats, self.clock, self.active,
                                self.processed, self.loadavg, self.sw_ident,
                                self.sw_ver, self.sw_sys)

    def _create_event_handler(self):
        _set = object.__setattr__
        hbmax = self.heartbeat_max
        heartbeats = self.heartbeats
        hb_pop = self.heartbeats.pop
        hb_append = self.heartbeats.append

        def event(type_, timestamp=None,
                  local_received=None, fields=None,
                  max_drift=HEARTBEAT_DRIFT_MAX, items=items, abs=abs, int=int,
                  insort=bisect.insort, len=len):
            fields = fields or {}
            for k, v in items(fields):
                _set(self, k, v)
            if type_ == 'offline':
                heartbeats[:] = []
            else:
                if not local_received or not timestamp:
                    return
                drift = abs(int(local_received) - int(timestamp))
                if drift > HEARTBEAT_DRIFT_MAX:
                    _warn_drift(self.hostname, drift,
                                local_received, timestamp)
                if local_received:
                    hearts = len(heartbeats)
                    if hearts > hbmax - 1:
                        hb_pop(0)
                    if hearts and local_received > heartbeats[-1]:
                        hb_append(local_received)
                    else:
                        insort(heartbeats, local_received)
        return event

    def update(self, f, **kw):
        for k, v in items(dict(f, **kw) if kw else f):
            setattr(self, k, v)

    def __repr__(self):
        return R_WORKER.format(self)

    @property
    def status_string(self):
        return 'ONLINE' if self.alive else 'OFFLINE'

    @property
    def heartbeat_expires(self):
        return heartbeat_expires(self.heartbeats[-1],
                                 self.freq, self.expire_window)

    @property
    def alive(self, nowfun=time):
        return bool(self.heartbeats and nowfun() < self.heartbeat_expires)

    @property
    def id(self):
        return '{0.hostname}.{0.pid}'.format(self)

    @deprecated(3.2, 3.3)
    def update_heartbeat(self, received, timestamp):
        self.event(None, timestamp, received)

    @deprecated(3.2, 3.3)
    def on_online(self, timestamp=None, local_received=None, **fields):
        self.event('online', timestamp, local_received, fields)

    @deprecated(3.2, 3.3)
    def on_offline(self, timestamp=None, local_received=None, **fields):
        self.event('offline', timestamp, local_received, fields)

    @deprecated(3.2, 3.3)
    def on_heartbeat(self, timestamp=None, local_received=None, **fields):
        self.event('heartbeat', timestamp, local_received, fields)

    @class_property
    def _defaults(cls):
        """Deprecated, to be removed in 3.3"""
        source = cls()
        return dict((k, getattr(source, k)) for k in cls._fields)


@with_unique_field('uuid')
class Task(object):
    """Task State."""
    name = received = sent = started = succeeded = failed = retried = \
        revoked = args = kwargs = eta = expires = retries = worker = result = \
        exception = timestamp = runtime = traceback = exchange = \
        routing_key = client = None
    state = states.PENDING
    clock = 0

    _fields = ('uuid', 'name', 'state', 'received', 'sent', 'started',
               'succeeded', 'failed', 'retried', 'revoked', 'args', 'kwargs',
               'eta', 'expires', 'retries', 'worker', 'result', 'exception',
               'timestamp', 'runtime', 'traceback', 'exchange', 'routing_key',
               'clock', 'client')
    if not PYPY:
        __slots__ = ('__dict__', '__weakref__')

    #: How to merge out of order events.
    #: Disorder is detected by logical ordering (e.g. :event:`task-received`
    #: must have happened before a :event:`task-failed` event).
    #:
    #: A merge rule consists of a state and a list of fields to keep from
    #: that state. ``(RECEIVED, ('name', 'args')``, means the name and args
    #: fields are always taken from the RECEIVED state, and any values for
    #: these fields received before or after is simply ignored.
    merge_rules = {states.RECEIVED: ('name', 'args', 'kwargs',
                                     'retries', 'eta', 'expires')}

    #: meth:`info` displays these fields by default.
    _info_fields = ('args', 'kwargs', 'retries', 'result', 'eta', 'runtime',
                    'expires', 'exception', 'exchange', 'routing_key')

    def __init__(self, uuid=None, **kwargs):
        self.uuid = uuid
        if kwargs:
            for k, v in items(kwargs):
                setattr(self, k, v)

    def event(self, type_, timestamp=None, local_received=None, fields=None,
              precedence=states.precedence, items=items, dict=dict,
              PENDING=states.PENDING, RECEIVED=states.RECEIVED,
              STARTED=states.STARTED, FAILURE=states.FAILURE,
              RETRY=states.RETRY, SUCCESS=states.SUCCESS,
              REVOKED=states.REVOKED):
        fields = fields or {}
        if type_ == 'sent':
            state, self.sent = PENDING, timestamp
        elif type_ == 'received':
            state, self.received = RECEIVED, timestamp
        elif type_ == 'started':
            state, self.started = STARTED, timestamp
        elif type_ == 'failed':
            state, self.failed = FAILURE, timestamp
        elif type_ == 'retried':
            state, self.retried = RETRY, timestamp
        elif type_ == 'succeeded':
            state, self.succeeded = SUCCESS, timestamp
        elif type_ == 'revoked':
            state, self.revoked = REVOKED, timestamp
        else:
            state = type_.upper()

        # note that precedence here is reversed
        # see implementation in celery.states.state.__lt__
        if state != RETRY and self.state != RETRY and \
                precedence(state) > precedence(self.state):
            # this state logically happens-before the current state, so merge.
            keep = self.merge_rules.get(state)
            if keep is not None:
                fields = dict(
                    (k, v) for k, v in items(fields) if k in keep
                )
            for key, value in items(fields):
                setattr(self, key, value)
        else:
            self.state = state
            self.timestamp = timestamp
            for key, value in items(fields):
                setattr(self, key, value)

    def info(self, fields=None, extra=[]):
        """Information about this task suitable for on-screen display."""
        fields = self._info_fields if fields is None else fields

        def _keys():
            for key in list(fields) + list(extra):
                value = getattr(self, key, None)
                if value is not None:
                    yield key, value

        return dict(_keys())

    def __repr__(self):
        return R_TASK.format(self)

    def as_dict(self):
        get = object.__getattribute__
        return dict(
            (k, get(self, k)) for k in self._fields
        )

    def __reduce__(self):
        return _depickle_task, (self.__class__, self.as_dict())

    @property
    def origin(self):
        return self.client if self.worker is None else self.worker.id

    @property
    def ready(self):
        return self.state in states.READY_STATES

    @deprecated(3.2, 3.3)
    def on_sent(self, timestamp=None, **fields):
        self.event('sent', timestamp, fields)

    @deprecated(3.2, 3.3)
    def on_received(self, timestamp=None, **fields):
        self.event('received', timestamp, fields)

    @deprecated(3.2, 3.3)
    def on_started(self, timestamp=None, **fields):
        self.event('started', timestamp, fields)

    @deprecated(3.2, 3.3)
    def on_failed(self, timestamp=None, **fields):
        self.event('failed', timestamp, fields)

    @deprecated(3.2, 3.3)
    def on_retried(self, timestamp=None, **fields):
        self.event('retried', timestamp, fields)

    @deprecated(3.2, 3.3)
    def on_succeeded(self, timestamp=None, **fields):
        self.event('succeeded', timestamp, fields)

    @deprecated(3.2, 3.3)
    def on_revoked(self, timestamp=None, **fields):
        self.event('revoked', timestamp, fields)

    @deprecated(3.2, 3.3)
    def on_unknown_event(self, shortype, timestamp=None, **fields):
        self.event(shortype, timestamp, fields)

    @deprecated(3.2, 3.3)
    def update(self, state, timestamp, fields,
               _state=states.state, RETRY=states.RETRY):
        return self.event(state, timestamp, None, fields)

    @deprecated(3.2, 3.3)
    def merge(self, state, timestamp, fields):
        keep = self.merge_rules.get(state)
        if keep is not None:
            fields = dict((k, v) for k, v in items(fields) if k in keep)
        for key, value in items(fields):
            setattr(self, key, value)

    @class_property
    def _defaults(cls):
        """Deprecated, to be removed in 3.3."""
        source = cls()
        return dict((k, getattr(source, k)) for k in source._fields)


class State(object):
    """Records clusters state."""
    Worker = Worker
    Task = Task
    event_count = 0
    task_count = 0
    heap_multiplier = 4

    def __init__(self, callback=None,
                 workers=None, tasks=None, taskheap=None,
                 max_workers_in_memory=5000, max_tasks_in_memory=10000,
                 on_node_join=None, on_node_leave=None):
        self.event_callback = callback
        self.workers = (LRUCache(max_workers_in_memory)
                        if workers is None else workers)
        self.tasks = (LRUCache(max_tasks_in_memory)
                      if tasks is None else tasks)
        self._taskheap = [] if taskheap is None else taskheap
        self.max_workers_in_memory = max_workers_in_memory
        self.max_tasks_in_memory = max_tasks_in_memory
        self.on_node_join = on_node_join
        self.on_node_leave = on_node_leave
        self._mutex = threading.Lock()
        self.handlers = {}
        self._seen_types = set()
        self.rebuild_taskheap()

    @cached_property
    def _event(self):
        return self._create_dispatcher()

    def freeze_while(self, fun, *args, **kwargs):
        clear_after = kwargs.pop('clear_after', False)
        with self._mutex:
            try:
                return fun(*args, **kwargs)
            finally:
                if clear_after:
                    self._clear()

    def clear_tasks(self, ready=True):
        with self._mutex:
            return self._clear_tasks(ready)

    def _clear_tasks(self, ready=True):
        if ready:
            in_progress = dict(
                (uuid, task) for uuid, task in self.itertasks()
                if task.state not in states.READY_STATES)
            self.tasks.clear()
            self.tasks.update(in_progress)
        else:
            self.tasks.clear()
        self._taskheap[:] = []

    def _clear(self, ready=True):
        self.workers.clear()
        self._clear_tasks(ready)
        self.event_count = 0
        self.task_count = 0

    def clear(self, ready=True):
        with self._mutex:
            return self._clear(ready)

    def get_or_create_worker(self, hostname, **kwargs):
        """Get or create worker by hostname.

        Return tuple of ``(worker, was_created)``.
        """
        try:
            worker = self.workers[hostname]
            if kwargs:
                worker.update(kwargs)
            return worker, False
        except KeyError:
            worker = self.workers[hostname] = self.Worker(
                hostname, **kwargs)
            return worker, True

    def get_or_create_task(self, uuid):
        """Get or create task by uuid."""
        try:
            return self.tasks[uuid], False
        except KeyError:
            task = self.tasks[uuid] = self.Task(uuid)
            return task, True

    def event(self, event):
        with self._mutex:
            return self._event(event)

    def task_event(self, type_, fields):
        """Deprecated, use :meth:`event`."""
        return self._event(dict(fields, type='-'.join(['task', type_])))[0]

    def worker_event(self, type_, fields):
        """Deprecated, use :meth:`event`."""
        return self._event(dict(fields, type='-'.join(['worker', type_])))[0]

    def _create_dispatcher(self):
        get_handler = self.handlers.__getitem__
        event_callback = self.event_callback
        wfields = itemgetter('hostname', 'timestamp', 'local_received')
        tfields = itemgetter('uuid', 'hostname', 'timestamp',
                             'local_received', 'clock')
        taskheap = self._taskheap
        th_append = taskheap.append
        th_pop = taskheap.pop
        # Removing events from task heap is an O(n) operation,
        # so easier to just account for the common number of events
        # for each task (PENDING->RECEIVED->STARTED->final)
        #: an O(n) operation
        max_events_in_heap = self.max_tasks_in_memory * self.heap_multiplier
        add_type = self._seen_types.add
        on_node_join, on_node_leave = self.on_node_join, self.on_node_leave
        tasks, Task = self.tasks, self.Task
        workers, Worker = self.workers, self.Worker
        # avoid updating LRU entry at getitem
        get_worker, get_task = workers.data.__getitem__, tasks.data.__getitem__

        def _event(event,
                   timetuple=timetuple, KeyError=KeyError,
                   insort=bisect.insort, created=True):
            self.event_count += 1
            if event_callback:
                event_callback(self, event)
            group, _, subject = event['type'].partition('-')
            try:
                handler = get_handler(group)
            except KeyError:
                pass
            else:
                return handler(subject, event), subject

            if group == 'worker':
                try:
                    hostname, timestamp, local_received = wfields(event)
                except KeyError:
                    pass
                else:
                    is_offline = subject == 'offline'
                    try:
                        worker, created = get_worker(hostname), False
                    except KeyError:
                        if is_offline:
                            worker, created = Worker(hostname), False
                        else:
                            worker = workers[hostname] = Worker(hostname)
                    worker.event(subject, timestamp, local_received, event)
                    if on_node_join and (created or subject == 'online'):
                        on_node_join(worker)
                    if on_node_leave and is_offline:
                        on_node_leave(worker)
                        workers.pop(hostname, None)
                    return (worker, created), subject
            elif group == 'task':
                (uuid, hostname, timestamp,
                 local_received, clock) = tfields(event)
                # task-sent event is sent by client, not worker
                is_client_event = subject == 'sent'
                try:
                    task, created = get_task(uuid), False
                except KeyError:
                    task = tasks[uuid] = Task(uuid)
                if is_client_event:
                    task.client = hostname
                else:
                    try:
                        worker, created = get_worker(hostname), False
                    except KeyError:
                        worker = workers[hostname] = Worker(hostname)
                    task.worker = worker
                    if worker is not None and local_received:
                        worker.event(None, local_received, timestamp)

                origin = hostname if is_client_event else worker.id

                # remove oldest event if exceeding the limit.
                heaps = len(taskheap)
                if heaps + 1 > max_events_in_heap:
                    th_pop(0)

                # most events will be dated later than the previous.
                timetup = timetuple(clock, timestamp, origin, ref(task))
                if heaps and timetup > taskheap[-1]:
                    th_append(timetup)
                else:
                    insort(taskheap, timetup)

                if subject == 'received':
                    self.task_count += 1
                task.event(subject, timestamp, local_received, event)
                task_name = task.name
                if task_name is not None:
                    add_type(task_name)
                return (task, created), subject
        return _event

    def rebuild_taskheap(self, timetuple=timetuple):
        heap = self._taskheap[:] = [
            timetuple(t.clock, t.timestamp, t.origin, ref(t))
            for t in values(self.tasks)
        ]
        heap.sort()

    def itertasks(self, limit=None):
        for index, row in enumerate(items(self.tasks)):
            yield row
            if limit and index + 1 >= limit:
                break

    def tasks_by_time(self, limit=None):
        """Generator giving tasks ordered by time,
        in ``(uuid, Task)`` tuples."""
        seen = set()
        for evtup in islice(reversed(self._taskheap), 0, limit):
            task = evtup[3]()
            if task is not None:
                uuid = task.uuid
                if uuid not in seen:
                    yield uuid, task
                    seen.add(uuid)
    tasks_by_timestamp = tasks_by_time

    def tasks_by_type(self, name, limit=None):
        """Get all tasks by type.

        Return a list of ``(uuid, Task)`` tuples.

        """
        return islice(
            ((uuid, task) for uuid, task in self.tasks_by_time()
             if task.name == name),
            0, limit,
        )

    def tasks_by_worker(self, hostname, limit=None):
        """Get all tasks by worker.

        """
        return islice(
            ((uuid, task) for uuid, task in self.tasks_by_time()
             if task.worker.hostname == hostname),
            0, limit,
        )

    def task_types(self):
        """Return a list of all seen task types."""
        return sorted(self._seen_types)

    def alive_workers(self):
        """Return a list of (seemingly) alive workers."""
        return [w for w in values(self.workers) if w.alive]

    def __repr__(self):
        return R_STATE.format(self)

    def __reduce__(self):
        return self.__class__, (
            self.event_callback, self.workers, self.tasks, None,
            self.max_workers_in_memory, self.max_tasks_in_memory,
            self.on_node_join, self.on_node_leave,
        )