This file is indexed.

/usr/include/tbb/task.h is in libtbb-dev 4.4~20151115-0ubuntu3.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
/*
    Copyright 2005-2015 Intel Corporation.  All Rights Reserved.

    This file is part of Threading Building Blocks. Threading Building Blocks is free software;
    you can redistribute it and/or modify it under the terms of the GNU General Public License
    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is
    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
    See  the GNU General Public License for more details.   You should have received a copy of
    the  GNU General Public License along with Threading Building Blocks; if not, write to the
    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA

    As a special exception,  you may use this file  as part of a free software library without
    restriction.  Specifically,  if other files instantiate templates  or use macros or inline
    functions from this file, or you compile this file and link it with other files to produce
    an executable,  this file does not by itself cause the resulting executable to be covered
    by the GNU General Public License. This exception does not however invalidate any other
    reasons why the executable file might be covered by the GNU General Public License.
*/

#ifndef __TBB_task_H
#define __TBB_task_H

#include "tbb_stddef.h"
#include "tbb_machine.h"
#include "tbb_profiling.h"
#include <climits>

typedef struct ___itt_caller *__itt_caller;

namespace tbb {

class task;
class task_list;
class task_group_context;

// MSVC does not allow taking the address of a member that was defined
// privately in task_base and made public in class task via a using declaration.
#if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3)
#define __TBB_TASK_BASE_ACCESS public
#else
#define __TBB_TASK_BASE_ACCESS private
#endif

namespace internal { //< @cond INTERNAL

    class allocate_additional_child_of_proxy: no_assign {
        //! No longer used, but retained for binary layout compatibility.  Always NULL.
        task* self;
        task& parent;
    public:
        explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {}
        task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
        void __TBB_EXPORTED_METHOD free( task& ) const;
    };

    struct cpu_ctl_env_space { int space[sizeof(internal::uint64_t)/sizeof(int)]; };
} //< namespace internal @endcond

namespace interface5 {
    namespace internal {
        //! Base class for methods that became static in TBB 3.0.
        /** TBB's evolution caused the "this" argument for several methods to become obsolete.
            However, for backwards binary compatibility, the new methods need distinct names,
            otherwise the One Definition Rule would be broken.  Hence the new methods are
            defined in this private base class, and then exposed in class task via
            using declarations. */
        class task_base: tbb::internal::no_copy {
        __TBB_TASK_BASE_ACCESS:
            friend class tbb::task;

            //! Schedule task for execution when a worker becomes available.
            static void spawn( task& t );

            //! Spawn multiple tasks and clear list.
            static void spawn( task_list& list );

            //! Like allocate_child, except that task's parent becomes "t", not this.
            /** Typically used in conjunction with schedule_to_reexecute to implement while loops.
               Atomically increments the reference count of t.parent() */
            static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) {
                return tbb::internal::allocate_additional_child_of_proxy(t);
            }

            //! Destroy a task.
            /** Usually, calling this method is unnecessary, because a task is
                implicitly deleted after its execute() method runs.  However,
                sometimes a task needs to be explicitly deallocated, such as
                when a root task is used as the parent in spawn_and_wait_for_all. */
            static void __TBB_EXPORTED_FUNC destroy( task& victim );
        };
    } // internal
} // interface5

//! @cond INTERNAL
namespace internal {

    class scheduler: no_copy {
    public:
        //! For internal use only
        virtual void spawn( task& first, task*& next ) = 0;

        //! For internal use only
        virtual void wait_for_all( task& parent, task* child ) = 0;

        //! For internal use only
        virtual void spawn_root_and_wait( task& first, task*& next ) = 0;

        //! Pure virtual destructor;
        //  Have to have it just to shut up overzealous compilation warnings
        virtual ~scheduler() = 0;

        //! For internal use only
        virtual void enqueue( task& t, void* reserved ) = 0;
    };

    //! A reference count
    /** Should always be non-negative.  A signed type is used so that underflow can be detected. */
    typedef intptr_t reference_count;

    //! An id as used for specifying affinity.
    typedef unsigned short affinity_id;

#if __TBB_TASK_GROUP_CONTEXT
    class generic_scheduler;

    struct context_list_node_t {
        context_list_node_t *my_prev,
                            *my_next;
    };

    class allocate_root_with_context_proxy: no_assign {
        task_group_context& my_context;
    public:
        allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {}
        task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
        void __TBB_EXPORTED_METHOD free( task& ) const;
    };
#endif /* __TBB_TASK_GROUP_CONTEXT */

    class allocate_root_proxy: no_assign {
    public:
        static task& __TBB_EXPORTED_FUNC allocate( size_t size );
        static void __TBB_EXPORTED_FUNC free( task& );
    };

    class allocate_continuation_proxy: no_assign {
    public:
        task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
        void __TBB_EXPORTED_METHOD free( task& ) const;
    };

    class allocate_child_proxy: no_assign {
    public:
        task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
        void __TBB_EXPORTED_METHOD free( task& ) const;
    };

    //! Memory prefix to a task object.
    /** This class is internal to the library.
        Do not reference it directly, except within the library itself.
        Fields are ordered in way that preserves backwards compatibility and yields
        good packing on typical 32-bit and 64-bit platforms.

        In case task prefix size exceeds 32 or 64 bytes on IA32 and Intel64
        architectures correspondingly, consider dynamic setting of task_alignment
        and task_prefix_reservation_size based on the maximal operand size supported
        by the current CPU.

        @ingroup task_scheduling */
    class task_prefix {
    private:
        friend class tbb::task;
        friend class tbb::interface5::internal::task_base;
        friend class tbb::task_list;
        friend class internal::scheduler;
        friend class internal::allocate_root_proxy;
        friend class internal::allocate_child_proxy;
        friend class internal::allocate_continuation_proxy;
        friend class internal::allocate_additional_child_of_proxy;

#if __TBB_TASK_GROUP_CONTEXT
        //! Shared context that is used to communicate asynchronous state changes
        /** Currently it is used to broadcast cancellation requests generated both
            by users and as the result of unhandled exceptions in the task::execute()
            methods. */
        task_group_context  *context;
#endif /* __TBB_TASK_GROUP_CONTEXT */

        //! The scheduler that allocated the task, or NULL if the task is big.
        /** Small tasks are pooled by the scheduler that allocated the task.
            If a scheduler needs to free a small task allocated by another scheduler,
            it returns the task to that other scheduler.  This policy avoids
            memory space blowup issues for memory allocators that allocate from
            thread-specific pools. */
        scheduler* origin;

#if __TBB_TASK_PRIORITY
        union {
#endif /* __TBB_TASK_PRIORITY */
        //! Obsolete. The scheduler that owns the task.
        /** Retained only for the sake of backward binary compatibility.
            Still used by inline methods in the task.h header. **/
        scheduler* owner;

#if __TBB_TASK_PRIORITY
        //! Pointer to the next offloaded lower priority task.
        /** Used to maintain a list of offloaded tasks inside the scheduler. **/
        task* next_offloaded;
        };
#endif /* __TBB_TASK_PRIORITY */

        //! The task whose reference count includes me.
        /** In the "blocking style" of programming, this field points to the parent task.
            In the "continuation-passing style" of programming, this field points to the
            continuation of the parent. */
        tbb::task* parent;

        //! Reference count used for synchronization.
        /** In the "continuation-passing style" of programming, this field is
            the difference of the number of allocated children minus the
            number of children that have completed.
            In the "blocking style" of programming, this field is one more than the difference. */
        __TBB_atomic reference_count ref_count;

        //! Obsolete. Used to be scheduling depth before TBB 2.2
        /** Retained only for the sake of backward binary compatibility.
            Not used by TBB anymore. **/
        int depth;

        //! A task::state_type, stored as a byte for compactness.
        /** This state is exposed to users via method task::state(). */
        unsigned char state;

        //! Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
        /** 0x0 -> version 1.0 task
            0x1 -> version >=2.1 task
            0x10 -> task was enqueued
            0x20 -> task_proxy
            0x40 -> task has live ref_count
            0x80 -> a stolen task */
        unsigned char extra_state;

        affinity_id affinity;

        //! "next" field for list of task
        tbb::task* next;

        //! The task corresponding to this task_prefix.
        tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
    };

} // namespace internal
//! @endcond

#if __TBB_TASK_GROUP_CONTEXT

#if __TBB_TASK_PRIORITY
namespace internal {
    static const int priority_stride_v4 = INT_MAX / 4;
}

enum priority_t {
    priority_normal = internal::priority_stride_v4 * 2,
    priority_low = priority_normal - internal::priority_stride_v4,
    priority_high = priority_normal + internal::priority_stride_v4
};

#endif /* __TBB_TASK_PRIORITY */

#if TBB_USE_CAPTURED_EXCEPTION
    class tbb_exception;
#else
    namespace internal {
        class tbb_exception_ptr;
    }
#endif /* !TBB_USE_CAPTURED_EXCEPTION */

class task_scheduler_init;
namespace interface7 { class task_arena; }

//! Used to form groups of tasks
/** @ingroup task_scheduling
    The context services explicit cancellation requests from user code, and unhandled
    exceptions intercepted during tasks execution. Intercepting an exception results
    in generating internal cancellation requests (which is processed in exactly the
    same way as external ones).

    The context is associated with one or more root tasks and defines the cancellation
    group that includes all the descendants of the corresponding root task(s). Association
    is established when a context object is passed as an argument to the task::allocate_root()
    method. See task_group_context::task_group_context for more details.

    The context can be bound to another one, and other contexts can be bound to it,
    forming a tree-like structure: parent -> this -> children. Arrows here designate
    cancellation propagation direction. If a task in a cancellation group is cancelled
    all the other tasks in this group and groups bound to it (as children) get cancelled too.

    IMPLEMENTATION NOTE:
    When adding new members to task_group_context or changing types of existing ones,
    update the size of both padding buffers (_leading_padding and _trailing_padding)
    appropriately. See also VERSIONING NOTE at the constructor definition below. **/
class task_group_context : internal::no_copy {
private:
    friend class internal::generic_scheduler;
    friend class task_scheduler_init;
    friend class interface7::task_arena;

#if TBB_USE_CAPTURED_EXCEPTION
    typedef tbb_exception exception_container_type;
#else
    typedef internal::tbb_exception_ptr exception_container_type;
#endif

    enum version_traits_word_layout {
        traits_offset = 16,
        version_mask = 0xFFFF,
        traits_mask = 0xFFFFul << traits_offset
    };

public:
    enum kind_type {
        isolated,
        bound
    };

    enum traits_type {
        exact_exception = 0x0001ul << traits_offset,
#if __TBB_FP_CONTEXT
        fp_settings     = 0x0002ul << traits_offset,
#endif
        concurrent_wait = 0x0004ul << traits_offset,
#if TBB_USE_CAPTURED_EXCEPTION
        default_traits = 0
#else
        default_traits = exact_exception
#endif /* !TBB_USE_CAPTURED_EXCEPTION */
    };

private:
    enum state {
        may_have_children = 1,
        // the following enumerations must be the last, new 2^x values must go above
        next_state_value, low_unused_state_bit = (next_state_value-1)*2
    };

    union {
        //! Flavor of this context: bound or isolated.
        // TODO: describe asynchronous use, and whether any memory semantics are needed
        __TBB_atomic kind_type my_kind;
        uintptr_t _my_kind_aligner;
    };

    //! Pointer to the context of the parent cancellation group. NULL for isolated contexts.
    task_group_context *my_parent;

    //! Used to form the thread specific list of contexts without additional memory allocation.
    /** A context is included into the list of the current thread when its binding to
        its parent happens. Any context can be present in the list of one thread only. **/
    internal::context_list_node_t my_node;

    //! Used to set and maintain stack stitching point for Intel Performance Tools.
    __itt_caller itt_caller;

    //! Leading padding protecting accesses to frequently used members from false sharing.
    /** Read accesses to the field my_cancellation_requested are on the hot path inside
        the scheduler. This padding ensures that this field never shares the same cache
        line with a local variable that is frequently written to. **/
    char _leading_padding[internal::NFS_MaxLineSize
                          - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)
                          - sizeof(__itt_caller)
#if __TBB_FP_CONTEXT
                          - sizeof(internal::cpu_ctl_env_space)
#endif
                         ];

#if __TBB_FP_CONTEXT
    //! Space for platform-specific FPU settings.
    /** Must only be accessed inside TBB binaries, and never directly in user
        code or inline methods. */
    internal::cpu_ctl_env_space my_cpu_ctl_env;
#endif

    //! Specifies whether cancellation was requested for this task group.
    uintptr_t my_cancellation_requested;

    //! Version for run-time checks and behavioral traits of the context.
    /** Version occupies low 16 bits, and traits (zero or more ORed enumerators
        from the traits_type enumerations) take the next 16 bits.
        Original (zeroth) version of the context did not support any traits. **/
    uintptr_t my_version_and_traits;

    //! Pointer to the container storing exception being propagated across this task group.
    exception_container_type *my_exception;

    //! Scheduler instance that registered this context in its thread specific list.
    internal::generic_scheduler *my_owner;

    //! Internal state (combination of state flags, currently only may_have_children).
    uintptr_t my_state;

#if __TBB_TASK_PRIORITY
    //! Priority level of the task group (in normalized representation)
    intptr_t my_priority;
#endif /* __TBB_TASK_PRIORITY */

    //! Trailing padding protecting accesses to frequently used members from false sharing
    /** \sa _leading_padding **/
    char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t) - 2 * sizeof(void*)
#if __TBB_TASK_PRIORITY
                            - sizeof(intptr_t)
#endif /* __TBB_TASK_PRIORITY */
                          ];

public:
    //! Default & binding constructor.
    /** By default a bound context is created. That is this context will be bound
        (as child) to the context of the task calling task::allocate_root(this_context)
        method. Cancellation requests passed to the parent context are propagated
        to all the contexts bound to it. Similarly priority change is propagated
        from the parent context to its children.

        If task_group_context::isolated is used as the argument, then the tasks associated
        with this context will never be affected by events in any other context.

        Creating isolated contexts involve much less overhead, but they have limited
        utility. Normally when an exception occurs in an algorithm that has nested
        ones running, it is desirably to have all the nested algorithms cancelled
        as well. Such a behavior requires nested algorithms to use bound contexts.

        There is one good place where using isolated algorithms is beneficial. It is
        a master thread. That is if a particular algorithm is invoked directly from
        the master thread (not from a TBB task), supplying it with explicitly
        created isolated context will result in a faster algorithm startup.

        VERSIONING NOTE:
        Implementation(s) of task_group_context constructor(s) cannot be made
        entirely out-of-line because the run-time version must be set by the user
        code. This will become critically important for binary compatibility, if
        we ever have to change the size of the context object.

        Boosting the runtime version will also be necessary if new data fields are
        introduced in the currently unused padding areas and these fields are updated
        by inline methods. **/
    task_group_context ( kind_type relation_with_parent = bound,
                         uintptr_t t = default_traits )
        : my_kind(relation_with_parent)
        , my_version_and_traits(2 | t)
    {
        init();
    }

    // Do not introduce standalone unbind method since it will break state propagation assumptions
    __TBB_EXPORTED_METHOD ~task_group_context ();

    //! Forcefully reinitializes the context after the task tree it was associated with is completed.
    /** Because the method assumes that all the tasks that used to be associated with
        this context have already finished, calling it while the context is still
        in use somewhere in the task hierarchy leads to undefined behavior.

        IMPORTANT: This method is not thread safe!

        The method does not change the context's parent if it is set. **/
    void __TBB_EXPORTED_METHOD reset ();

    //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
    /** \return false if cancellation has already been requested, true otherwise.

        Note that canceling never fails. When false is returned, it just means that
        another thread (or this one) has already sent cancellation request to this
        context or to one of its ancestors (if this context is bound). It is guaranteed
        that when this method is concurrently called on the same not yet cancelled
        context, true will be returned by one and only one invocation. **/
    bool __TBB_EXPORTED_METHOD cancel_group_execution ();

    //! Returns true if the context received cancellation request.
    bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const;

    //! Records the pending exception, and cancels the task group.
    /** May be called only from inside a catch-block. If the context is already
        cancelled, does nothing.
        The method brings the task group associated with this context exactly into
        the state it would be in, if one of its tasks threw the currently pending
        exception during its execution. In other words, it emulates the actions
        of the scheduler's dispatch loop exception handler. **/
    void __TBB_EXPORTED_METHOD register_pending_exception ();

#if __TBB_FP_CONTEXT
    //! Captures the current FPU control settings to the context.
    /** Because the method assumes that all the tasks that used to be associated with
        this context have already finished, calling it while the context is still
        in use somewhere in the task hierarchy leads to undefined behavior.

        IMPORTANT: This method is not thread safe!

        The method does not change the FPU control settings of the context's parent. **/
    void __TBB_EXPORTED_METHOD capture_fp_settings ();
#endif

#if __TBB_TASK_PRIORITY
    //! Changes priority of the task group
    void set_priority ( priority_t );

    //! Retrieves current priority of the current task group
    priority_t priority () const;
#endif /* __TBB_TASK_PRIORITY */

    //! Returns the context's trait
    uintptr_t traits() const { return my_version_and_traits & traits_mask; }

protected:
    //! Out-of-line part of the constructor.
    /** Singled out to ensure backward binary compatibility of the future versions. **/
    void __TBB_EXPORTED_METHOD init ();

private:
    friend class task;
    friend class internal::allocate_root_with_context_proxy;

    static const kind_type binding_required = bound;
    static const kind_type binding_completed = kind_type(bound+1);
    static const kind_type detached = kind_type(binding_completed+1);
    static const kind_type dying = kind_type(detached+1);

    //! Propagates any state change detected to *this, and as an optimisation possibly also upward along the heritage line.
    template <typename T>
    void propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state );

    //! Registers this context with the local scheduler and binds it to its parent context
    void bind_to ( internal::generic_scheduler *local_sched );

    //! Registers this context with the local scheduler
    void register_with ( internal::generic_scheduler *local_sched );

#if __TBB_FP_CONTEXT
    //! Copies FPU control setting from another context
    // TODO: Consider adding #else stub in order to omit #if sections in other code
    void copy_fp_settings( const task_group_context &src );
#endif /* __TBB_FP_CONTEXT */
}; // class task_group_context

#endif /* __TBB_TASK_GROUP_CONTEXT */

//! Base class for user-defined tasks.
/** @ingroup task_scheduling */
class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base {

    //! Set reference count
    void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );

    //! Decrement reference count and return its new value.
    internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();

protected:
    //! Default constructor.
    task() {prefix().extra_state=1;}

public:
    //! Destructor.
    virtual ~task() {}

    //! Should be overridden by derived classes.
    virtual task* execute() = 0;

    //! Enumeration of task states that the scheduler considers.
    enum state_type {
        //! task is running, and will be destroyed after method execute() completes.
        executing,
        //! task to be rescheduled.
        reexecute,
        //! task is in ready pool, or is going to be put there, or was just taken off.
        ready,
        //! task object is freshly allocated or recycled.
        allocated,
        //! task object is on free list, or is going to be put there, or was just taken off.
        freed,
        //! task to be recycled as continuation
        recycle
#if __TBB_RECYCLE_TO_ENQUEUE
        //! task to be scheduled for starvation-resistant execution
        ,to_enqueue
#endif
    };

    //------------------------------------------------------------------------
    // Allocating tasks
    //------------------------------------------------------------------------

    //! Returns proxy for overloaded new that allocates a root task.
    static internal::allocate_root_proxy allocate_root() {
        return internal::allocate_root_proxy();
    }

#if __TBB_TASK_GROUP_CONTEXT
    //! Returns proxy for overloaded new that allocates a root task associated with user supplied context.
    static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
        return internal::allocate_root_with_context_proxy(ctx);
    }
#endif /* __TBB_TASK_GROUP_CONTEXT */

    //! Returns proxy for overloaded new that allocates a continuation task of *this.
    /** The continuation's parent becomes the parent of *this. */
    internal::allocate_continuation_proxy& allocate_continuation() {
        return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
    }

    //! Returns proxy for overloaded new that allocates a child task of *this.
    internal::allocate_child_proxy& allocate_child() {
        return *reinterpret_cast<internal::allocate_child_proxy*>(this);
    }

    //! Define recommended static form via import from base class.
    using task_base::allocate_additional_child_of;

#if __TBB_DEPRECATED_TASK_INTERFACE
    //! Destroy a task.
    /** Usually, calling this method is unnecessary, because a task is
        implicitly deleted after its execute() method runs.  However,
        sometimes a task needs to be explicitly deallocated, such as
        when a root task is used as the parent in spawn_and_wait_for_all. */
    void __TBB_EXPORTED_METHOD destroy( task& t );
#else /* !__TBB_DEPRECATED_TASK_INTERFACE */
    //! Define recommended static form via import from base class.
    using task_base::destroy;
#endif /* !__TBB_DEPRECATED_TASK_INTERFACE */

    //------------------------------------------------------------------------
    // Recycling of tasks
    //------------------------------------------------------------------------

    //! Change this to be a continuation of its former self.
    /** The caller must guarantee that the task's refcount does not become zero until
        after the method execute() returns.  Typically, this is done by having
        method execute() return a pointer to a child of the task.  If the guarantee
        cannot be made, use method recycle_as_safe_continuation instead.

        Because of the hazard, this method may be deprecated in the future. */
    void recycle_as_continuation() {
        __TBB_ASSERT( prefix().state==executing, "execute not running?" );
        prefix().state = allocated;
    }

    //! Recommended to use, safe variant of recycle_as_continuation
    /** For safety, it requires additional increment of ref_count.
        With no descendants and ref_count of 1, it has the semantics of recycle_to_reexecute. */
    void recycle_as_safe_continuation() {
        __TBB_ASSERT( prefix().state==executing, "execute not running?" );
        prefix().state = recycle;
    }

    //! Change this to be a child of new_parent.
    void recycle_as_child_of( task& new_parent ) {
        internal::task_prefix& p = prefix();
        __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
        __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
        __TBB_ASSERT( p.parent==NULL, "parent must be null" );
        __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
        __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
        p.state = allocated;
        p.parent = &new_parent;
#if __TBB_TASK_GROUP_CONTEXT
        p.context = new_parent.prefix().context;
#endif /* __TBB_TASK_GROUP_CONTEXT */
    }

    //! Schedule this for reexecution after current execute() returns.
    /** Made obsolete by recycle_as_safe_continuation; may become deprecated. */
    void recycle_to_reexecute() {
        __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
        __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
        prefix().state = reexecute;
    }

#if __TBB_RECYCLE_TO_ENQUEUE
    //! Schedule this to enqueue after descendant tasks complete.
    /** Save enqueue/spawn difference, it has the semantics of recycle_as_safe_continuation. */
    void recycle_to_enqueue() {
        __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
        prefix().state = to_enqueue;
    }
#endif /* __TBB_RECYCLE_TO_ENQUEUE */

    //------------------------------------------------------------------------
    // Spawning and blocking
    //------------------------------------------------------------------------

    //! Set reference count
    void set_ref_count( int count ) {
#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
        internal_set_ref_count(count);
#else
        prefix().ref_count = count;
#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
    }

    //! Atomically increment reference count.
    /** Has acquire semantics */
    void increment_ref_count() {
        __TBB_FetchAndIncrementWacquire( &prefix().ref_count );
    }

    //! Atomically adds to reference count and returns its new value.
    /** Has release-acquire semantics */
    int add_ref_count( int count ) {
        internal::call_itt_notify( internal::releasing, &prefix().ref_count );
        internal::reference_count k = count+__TBB_FetchAndAddW( &prefix().ref_count, count );
        __TBB_ASSERT( k>=0, "task's reference count underflowed" );
        if( k==0 )
            internal::call_itt_notify( internal::acquired, &prefix().ref_count );
        return int(k);
    }

    //! Atomically decrement reference count and returns its new value.
    /** Has release semantics. */
    int decrement_ref_count() {
#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
        return int(internal_decrement_ref_count());
#else
        return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;
#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
    }

    //! Define recommended static forms via import from base class.
    using task_base::spawn;

    //! Similar to spawn followed by wait_for_all, but more efficient.
    void spawn_and_wait_for_all( task& child ) {
        prefix().owner->wait_for_all( *this, &child );
    }

    //! Similar to spawn followed by wait_for_all, but more efficient.
    void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );

    //! Spawn task allocated by allocate_root, wait for it to complete, and deallocate it.
    static void spawn_root_and_wait( task& root ) {
        root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
    }

    //! Spawn root tasks on list and wait for all of them to finish.
    /** If there are more tasks than worker threads, the tasks are spawned in
        order of front to back. */
    static void spawn_root_and_wait( task_list& root_list );

    //! Wait for reference count to become one, and set reference count to zero.
    /** Works on tasks while waiting. */
    void wait_for_all() {
        prefix().owner->wait_for_all( *this, NULL );
    }

    //! Enqueue task for starvation-resistant execution.
#if __TBB_TASK_PRIORITY
    /** The task will be enqueued on the normal priority level disregarding the
        priority of its task group.

        The rationale of such semantics is that priority of an enqueued task is
        statically fixed at the moment of its enqueuing, while task group priority
        is dynamic. Thus automatic priority inheritance would be generally a subject
        to the race, which may result in unexpected behavior.

        Use enqueue() overload with explicit priority value and task::group_priority()
        method to implement such priority inheritance when it is really necessary. **/
#endif /* __TBB_TASK_PRIORITY */
    static void enqueue( task& t ) {
        t.prefix().owner->enqueue( t, NULL );
    }

#if __TBB_TASK_PRIORITY
    //! Enqueue task for starvation-resistant execution on the specified priority level.
    static void enqueue( task& t, priority_t p ) {
        __TBB_ASSERT( p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value" );
        t.prefix().owner->enqueue( t, (void*)p );
    }
#endif /* __TBB_TASK_PRIORITY */

    //! The innermost task being executed or destroyed by the current thread at the moment.
    static task& __TBB_EXPORTED_FUNC self();

    //! task on whose behalf this task is working, or NULL if this is a root.
    task* parent() const {return prefix().parent;}

    //! sets parent task pointer to specified value
    void set_parent(task* p) {
#if __TBB_TASK_GROUP_CONTEXT
        __TBB_ASSERT(!p || prefix().context == p->prefix().context, "The tasks must be in the same context");
#endif
        prefix().parent = p;
    }

#if __TBB_TASK_GROUP_CONTEXT
    //! This method is deprecated and will be removed in the future.
    /** Use method group() instead. **/
    task_group_context* context() {return prefix().context;}

    //! Pointer to the task group descriptor.
    task_group_context* group () { return prefix().context; }
#endif /* __TBB_TASK_GROUP_CONTEXT */

    //! True if task was stolen from the task pool of another thread.
    bool is_stolen_task() const {
        return (prefix().extra_state & 0x80)!=0;
    }

    //------------------------------------------------------------------------
    // Debugging
    //------------------------------------------------------------------------

    //! Current execution state
    state_type state() const {return state_type(prefix().state);}

    //! The internal reference count.
    int ref_count() const {
#if TBB_USE_ASSERT
        internal::reference_count ref_count_ = prefix().ref_count;
        __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error");
#endif
        return int(prefix().ref_count);
    }

    //! Obsolete, and only retained for the sake of backward compatibility. Always returns true.
    bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;

    //------------------------------------------------------------------------
    // Affinity
    //------------------------------------------------------------------------

    //! An id as used for specifying affinity.
    /** Guaranteed to be integral type.  Value of 0 means no affinity. */
    typedef internal::affinity_id affinity_id;

    //! Set affinity for this task.
    void set_affinity( affinity_id id ) {prefix().affinity = id;}

    //! Current affinity of this task
    affinity_id affinity() const {return prefix().affinity;}

    //! Invoked by scheduler to notify task that it ran on unexpected thread.
    /** Invoked before method execute() runs, if task is stolen, or task has
        affinity but will be executed on another thread.

        The default action does nothing. */
    virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );

#if __TBB_TASK_GROUP_CONTEXT
    //! Moves this task from its current group into another one.
    /** Argument ctx specifies the new group.

        The primary purpose of this method is to associate unique task group context
        with a task allocated for subsequent enqueuing. In contrast to spawned tasks
        enqueued ones normally outlive the scope where they were created. This makes
        traditional usage model where task group context are allocated locally on
        the stack inapplicable. Dynamic allocation of context objects is performance
        inefficient. Method change_group() allows to make task group context object
        a member of the task class, and then associate it with its containing task
        object in the latter's constructor. **/
    void __TBB_EXPORTED_METHOD change_group ( task_group_context& ctx );

    //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
    /** \return false if cancellation has already been requested, true otherwise. **/
    bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }

    //! Returns true if the context has received cancellation request.
    bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
#else
    bool is_cancelled () const { return false; }
#endif /* __TBB_TASK_GROUP_CONTEXT */

#if __TBB_TASK_PRIORITY
    //! Changes priority of the task group this task belongs to.
    void set_group_priority ( priority_t p ) {  prefix().context->set_priority(p); }

    //! Retrieves current priority of the task group this task belongs to.
    priority_t group_priority () const { return prefix().context->priority(); }

#endif /* __TBB_TASK_PRIORITY */

private:
    friend class interface5::internal::task_base;
    friend class task_list;
    friend class internal::scheduler;
    friend class internal::allocate_root_proxy;
#if __TBB_TASK_GROUP_CONTEXT
    friend class internal::allocate_root_with_context_proxy;
#endif /* __TBB_TASK_GROUP_CONTEXT */
    friend class internal::allocate_continuation_proxy;
    friend class internal::allocate_child_proxy;
    friend class internal::allocate_additional_child_of_proxy;

    //! Get reference to corresponding task_prefix.
    /** Version tag prevents loader on Linux from using the wrong symbol in debug builds. **/
    internal::task_prefix& prefix( internal::version_tag* = NULL ) const {
        return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
    }
}; // class task

//! task that does nothing.  Useful for synchronization.
/** @ingroup task_scheduling */
class empty_task: public task {
    /*override*/ task* execute() {
        return NULL;
    }
};

//! @cond INTERNAL
namespace internal {
    template<typename F>
    class function_task : public task {
#if __TBB_ALLOW_MUTABLE_FUNCTORS
        F my_func;
#else
        const F my_func;
#endif
        /*override*/ task* execute() {
            my_func();
            return NULL;
        }
    public:
        function_task( const F& f ) : my_func(f) {}
    };
} // namespace internal
//! @endcond

//! A list of children.
/** Used for method task::spawn_children
    @ingroup task_scheduling */
class task_list: internal::no_copy {
private:
    task* first;
    task** next_ptr;
    friend class task;
    friend class interface5::internal::task_base;
public:
    //! Construct empty list
    task_list() : first(NULL), next_ptr(&first) {}

    //! Destroys the list, but does not destroy the task objects.
    ~task_list() {}

    //! True if list if empty; false otherwise.
    bool empty() const {return !first;}

    //! Push task onto back of list.
    void push_back( task& task ) {
        task.prefix().next = NULL;
        *next_ptr = &task;
        next_ptr = &task.prefix().next;
    }
#if __TBB_TODO
    // TODO: add this method and implement&document the local execution ordering. See more in generic_scheduler::local_spawn
    //! Push task onto front of list (FIFO local execution, like individual spawning in the same order).
    void push_front( task& task ) {
        if( empty() ) {
            push_back(task);
        } else {
            task.prefix().next = first;
            first = &task;
        }
    }
#endif
    //! Pop the front task from the list.
    task& pop_front() {
        __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
        task* result = first;
        first = result->prefix().next;
        if( !first ) next_ptr = &first;
        return *result;
    }

    //! Clear the list
    void clear() {
        first=NULL;
        next_ptr=&first;
    }
};

inline void interface5::internal::task_base::spawn( task& t ) {
    t.prefix().owner->spawn( t, t.prefix().next );
}

inline void interface5::internal::task_base::spawn( task_list& list ) {
    if( task* t = list.first ) {
        t->prefix().owner->spawn( *t, *list.next_ptr );
        list.clear();
    }
}

inline void task::spawn_root_and_wait( task_list& root_list ) {
    if( task* t = root_list.first ) {
        t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
        root_list.clear();
    }
}

} // namespace tbb

inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
    return &tbb::internal::allocate_root_proxy::allocate(bytes);
}

inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
    tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
}

#if __TBB_TASK_GROUP_CONTEXT
inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
    return &p.allocate(bytes);
}

inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
    p.free( *static_cast<tbb::task*>(task) );
}
#endif /* __TBB_TASK_GROUP_CONTEXT */

inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
    return &p.allocate(bytes);
}

inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
    p.free( *static_cast<tbb::task*>(task) );
}

inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
    return &p.allocate(bytes);
}

inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
    p.free( *static_cast<tbb::task*>(task) );
}

inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
    return &p.allocate(bytes);
}

inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
    p.free( *static_cast<tbb::task*>(task) );
}

#endif /* __TBB_task_H */