This file is indexed.

/usr/include/tbb/compat/condition_variable is in libtbb-dev 4.4~20151115-0ubuntu3.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
/*
    Copyright 2005-2015 Intel Corporation.  All Rights Reserved.

    This file is part of Threading Building Blocks. Threading Building Blocks is free software;
    you can redistribute it and/or modify it under the terms of the GNU General Public License
    version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is
    distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
    implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
    See  the GNU General Public License for more details.   You should have received a copy of
    the  GNU General Public License along with Threading Building Blocks; if not, write to the
    Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA

    As a special exception,  you may use this file  as part of a free software library without
    restriction.  Specifically,  if other files instantiate templates  or use macros or inline
    functions from this file, or you compile this file and link it with other files to produce
    an executable,  this file does not by itself cause the resulting executable to be covered
    by the GNU General Public License. This exception does not however invalidate any other
    reasons why the executable file might be covered by the GNU General Public License.
*/

#ifndef __TBB_condition_variable_H
#define __TBB_condition_variable_H

#if _WIN32||_WIN64
#include "../machine/windows_api.h"

namespace tbb { 
namespace interface5 {
namespace internal { 
struct condition_variable_using_event
{
    //! Event for blocking waiting threads.
    HANDLE event;
    //! Protects invariants involving n_waiters, release_count, and epoch.
    CRITICAL_SECTION mutex;
    //! Number of threads waiting on this condition variable
    int n_waiters;
    //! Number of threads remaining that should no longer wait on this condition variable.
    int release_count;
    //! To keep threads from waking up prematurely with earlier signals.
    unsigned epoch;
};
}}} // namespace tbb::interface5::internal

#ifndef CONDITION_VARIABLE_INIT
typedef void* CONDITION_VARIABLE;
typedef CONDITION_VARIABLE* PCONDITION_VARIABLE;
#endif

#else /* if not _WIN32||_WIN64 */
#include <errno.h> // some systems need it for ETIMEDOUT
#include <pthread.h>
#if __linux__
#include <ctime>
#else /* generic Unix */
#include <sys/time.h>
#endif
#endif /* _WIN32||_WIN64 */

#include "../tbb_stddef.h"
#include "../mutex.h"
#include "../tbb_thread.h"
#include "../tbb_exception.h"
#include "../tbb_profiling.h"

namespace tbb {

namespace interface5 {

// C++0x standard working draft 30.4.3
// Lock tag types
struct defer_lock_t { }; //! do not acquire ownership of the mutex
struct try_to_lock_t { }; //! try to acquire ownership of the mutex without blocking
struct adopt_lock_t { }; //! assume the calling thread has already
const defer_lock_t defer_lock = {};
const try_to_lock_t try_to_lock = {};
const adopt_lock_t adopt_lock = {};

// C++0x standard working draft 30.4.3.1
//! lock_guard 
template<typename M>
class lock_guard : tbb::internal::no_copy {
public:
    //! mutex type
    typedef M mutex_type;

    //! Constructor
    /** precondition: If mutex_type is not a recursive mutex, the calling thread
        does not own the mutex m. */
    explicit lock_guard(mutex_type& m) : pm(m) {m.lock();}
    
    //! Adopt_lock constructor
    /** precondition: the calling thread owns the mutex m. */
    lock_guard(mutex_type& m, adopt_lock_t) : pm(m) {}

    //! Destructor
    ~lock_guard() { pm.unlock(); }
private:
    mutex_type& pm;
};

// C++0x standard working draft 30.4.3.2
//! unique_lock 
template<typename M>
class unique_lock : tbb::internal::no_copy {
    friend class condition_variable;
public:
    typedef M mutex_type;

    // 30.4.3.2.1 construct/copy/destroy
    // NB: Without constructors that take an r-value reference to a unique_lock, the following constructor is of little use.
    //! Constructor
    /** postcondition: pm==0 && owns==false */
    unique_lock() : pm(NULL), owns(false) {}

    //! Constructor
    /** precondition: if mutex_type is not a recursive mutex, the  calling thread
        does not own the mutex m.  If the precondition is not met, a deadlock occurs.
        postcondition: pm==&m and owns==true */
    explicit unique_lock(mutex_type& m) : pm(&m) {m.lock(); owns=true;}

    //! Defer_lock constructor
    /** postcondition: pm==&m and owns==false */
    unique_lock(mutex_type& m, defer_lock_t) : pm(&m), owns(false) {}

    //! Try_to_lock constructor
    /** precondition: if mutex_type is not a recursive mutex, the  calling thread
       does not own the mutex m.  If the precondition is not met, a deadlock occurs.
       postcondition: pm==&m and owns==res where res is the value returned by
       the call to m.try_lock(). */
    unique_lock(mutex_type& m, try_to_lock_t) : pm(&m) {owns = m.try_lock();}

    //! Adopt_lock constructor
    /** precondition: the calling thread owns the mutex. If it does not, mutex->unlock() would fail.
        postcondition: pm==&m and owns==true */
    unique_lock(mutex_type& m, adopt_lock_t) : pm(&m), owns(true) {}

    //! Timed unique_lock acquisition.
    /** To avoid requiring support for namespace chrono, this method deviates from the working draft in that 
        it uses tbb::tick_count::interval_t to specify the time duration. */
    unique_lock(mutex_type& m, const tick_count::interval_t &i) : pm(&m) {owns = try_lock_for( i );}

#if __TBB_CPP11_RVALUE_REF_PRESENT
    //! Move constructor
    /** postconditions: pm == src_p.pm and owns == src_p.owns (where src_p is the state of src just prior to this
        construction), src.pm == 0 and src.owns == false. */
    unique_lock(unique_lock && src): pm(NULL), owns(false) {this->swap(src);}

    //! Move assignment
    /** effects: If owns calls pm->unlock().
        Postconditions: pm == src_p.pm and owns == src_p.owns (where src_p is the state of src just prior to this
        assignment), src.pm == 0 and src.owns == false. */
    unique_lock& operator=(unique_lock && src) {
        if (owns)
            this->unlock();
        pm = NULL;
        this->swap(src);
        return *this;
    }
#endif // __TBB_CPP11_RVALUE_REF_PRESENT

    //! Destructor
    ~unique_lock() { if( owns ) pm->unlock(); }

    // 30.4.3.2.2 locking
    //! Lock the mutex and own it.
    void lock() {
        if( pm ) {
            if( !owns ) {
                pm->lock();
                owns = true;
            } else 
                throw_exception_v4( tbb::internal::eid_possible_deadlock );
        } else 
            throw_exception_v4( tbb::internal::eid_operation_not_permitted );
        __TBB_ASSERT( owns, NULL );
    }

    //! Try to lock the mutex. 
    /** If successful, note that this lock owns it. Otherwise, set it false. */
    bool try_lock() {
        if( pm ) {
            if( !owns )
                owns = pm->try_lock();
            else
                throw_exception_v4( tbb::internal::eid_possible_deadlock );
        } else 
            throw_exception_v4( tbb::internal::eid_operation_not_permitted );
        return owns;
    }
 
    //! Try to lock the mutex. 
    bool try_lock_for( const tick_count::interval_t &i );

    //! Unlock the mutex
    /** And note that this lock no longer owns it. */
    void unlock() { 
        if( owns ) {
            pm->unlock();
            owns = false;
        } else
            throw_exception_v4( tbb::internal::eid_operation_not_permitted );
        __TBB_ASSERT( !owns, NULL );
    }

    // 30.4.3.2.3 modifiers
    //! Swap the two unique locks
    void swap(unique_lock& u) {
        mutex_type* t_pm = u.pm;    u.pm   = pm;    pm   = t_pm;
        bool t_owns      = u.owns;  u.owns = owns;  owns = t_owns;
    }

    //! Release control over the mutex.
    mutex_type* release() {
        mutex_type* o_pm = pm; 
        pm = NULL; 
        owns = false; 
        return o_pm; 
    }

    // 30.4.3.2.4 observers
    //! Does this lock own the mutex?
    bool owns_lock() const { return owns; }

    // TODO: Un-comment 'explicit' when the last non-C++0x compiler support is dropped
    //! Does this lock own the mutex?
    /*explicit*/ operator bool() const { return owns; }

    //! Return the mutex that this lock currently has.
    mutex_type* mutex() const { return pm; }

private:
    mutex_type* pm;
    bool owns;
};

template<typename M>
bool unique_lock<M>::try_lock_for( const tick_count::interval_t &i)
{ 
    const int unique_lock_tick = 100; /* microseconds; 0.1 milliseconds */
    // the smallest wait-time is 0.1 milliseconds.
    bool res = pm->try_lock();
    int duration_in_micro; 
    if( !res && (duration_in_micro=int(i.seconds()*1e6))>unique_lock_tick ) {
        tick_count::interval_t i_100( double(unique_lock_tick)/1e6 /* seconds */); // 100 microseconds = 0.1*10E-3
        do {
            this_tbb_thread::sleep(i_100); // sleep for 100 micro seconds
            duration_in_micro -= unique_lock_tick;
            res = pm->try_lock();
        } while( !res && duration_in_micro>unique_lock_tick );
    }
    return (owns=res);
}

//! Swap the two unique locks that have the mutexes of same type 
template<typename M>
void swap(unique_lock<M>& x, unique_lock<M>& y) { x.swap( y ); }

namespace internal {

#if _WIN32||_WIN64
union condvar_impl_t {
    condition_variable_using_event cv_event;
    CONDITION_VARIABLE             cv_native;
};
void __TBB_EXPORTED_FUNC internal_initialize_condition_variable( condvar_impl_t& cv );
void __TBB_EXPORTED_FUNC internal_destroy_condition_variable(    condvar_impl_t& cv );
void __TBB_EXPORTED_FUNC internal_condition_variable_notify_one( condvar_impl_t& cv );
void __TBB_EXPORTED_FUNC internal_condition_variable_notify_all( condvar_impl_t& cv );
bool __TBB_EXPORTED_FUNC internal_condition_variable_wait( condvar_impl_t& cv, mutex* mtx, const tick_count::interval_t* i = NULL );

#else /* if !(_WIN32||_WIN64), i.e., POSIX threads */
typedef pthread_cond_t condvar_impl_t;
#endif

} // namespace internal

//! cv_status
/** C++0x standard working draft 30.5 */
enum cv_status { no_timeout, timeout }; 

//! condition variable
/** C++0x standard working draft 30.5.1 
    @ingroup synchronization */
class condition_variable : tbb::internal::no_copy {
public:
    //! Constructor
    condition_variable() { 
#if _WIN32||_WIN64
        internal_initialize_condition_variable( my_cv ); 
#else
        pthread_cond_init( &my_cv, NULL );
#endif
    }

    //! Destructor
    ~condition_variable() { 
        //precondition: There shall be no thread blocked on *this.
#if _WIN32||_WIN64
        internal_destroy_condition_variable( my_cv );
#else
        pthread_cond_destroy( &my_cv );
#endif
    }

    //! Notify one thread and wake it up
    void notify_one() { 
#if _WIN32||_WIN64
        internal_condition_variable_notify_one( my_cv ); 
#else
        pthread_cond_signal( &my_cv );
#endif
    }

    //! Notify all threads 
    void notify_all() { 
#if _WIN32||_WIN64
        internal_condition_variable_notify_all( my_cv ); 
#else
        pthread_cond_broadcast( &my_cv );
#endif
    }

    //! Release the mutex associated with the lock and wait on this condition variable
    void wait(unique_lock<mutex>& lock);

    //! Wait on this condition variable while pred is false
    template <class Predicate>
    void wait(unique_lock<mutex>& lock, Predicate pred) {
        while( !pred() )
            wait( lock );
    }

    //! Timed version of wait()
    cv_status wait_for(unique_lock<mutex>& lock, const tick_count::interval_t &i );

    //! Timed version of the predicated wait
    /** The loop terminates when pred() returns true or when the time duration specified by rel_time (i) has elapsed. */
    template<typename Predicate>
    bool wait_for(unique_lock<mutex>& lock, const tick_count::interval_t &i, Predicate pred)
    {
        while( !pred() ) {
            cv_status st = wait_for( lock, i );
            if( st==timeout )
                return pred();
        }
        return true;
    }

    // C++0x standard working draft. 30.2.3
    typedef internal::condvar_impl_t* native_handle_type;

    native_handle_type native_handle() { return (native_handle_type) &my_cv; }

private:
    internal::condvar_impl_t my_cv;
};


#if _WIN32||_WIN64
inline void condition_variable::wait( unique_lock<mutex>& lock )
{
    __TBB_ASSERT( lock.owns, NULL );
    lock.owns = false;
    if( !internal_condition_variable_wait( my_cv, lock.mutex() ) ) {
        int ec = GetLastError();
        // on Windows 7, SleepConditionVariableCS() may return ERROR_TIMEOUT while the doc says it returns WAIT_TIMEOUT
        __TBB_ASSERT_EX( ec!=WAIT_TIMEOUT&&ec!=ERROR_TIMEOUT, NULL );
        lock.owns = true;
        throw_exception_v4( tbb::internal::eid_condvar_wait_failed );
    }
    lock.owns = true;
}

inline cv_status condition_variable::wait_for( unique_lock<mutex>& lock, const tick_count::interval_t& i )
{
    cv_status rc = no_timeout;
    __TBB_ASSERT( lock.owns, NULL );
    lock.owns = false;
    // condvar_wait could be SleepConditionVariableCS (or SleepConditionVariableSRW) or our own pre-vista cond_var_wait()
    if( !internal_condition_variable_wait( my_cv, lock.mutex(), &i ) ) {
        int ec = GetLastError();
        if( ec==WAIT_TIMEOUT || ec==ERROR_TIMEOUT )
            rc = timeout;
        else {
            lock.owns = true;
            throw_exception_v4( tbb::internal::eid_condvar_wait_failed );
        }
    }
    lock.owns = true;
    return rc;
}

#else /* !(_WIN32||_WIN64) */
inline void condition_variable::wait( unique_lock<mutex>& lock )
{
    __TBB_ASSERT( lock.owns, NULL );
    lock.owns = false;
    if( pthread_cond_wait( &my_cv, lock.mutex()->native_handle() ) ) {
        lock.owns = true;
        throw_exception_v4( tbb::internal::eid_condvar_wait_failed );
    }
    // upon successful return, the mutex has been locked and is owned by the calling thread.
    lock.owns = true;
}

inline cv_status condition_variable::wait_for( unique_lock<mutex>& lock, const tick_count::interval_t& i )
{
#if __linux__
    struct timespec req;
    double sec = i.seconds();
    clock_gettime( CLOCK_REALTIME, &req );
    req.tv_sec  += static_cast<long>(sec);
    req.tv_nsec += static_cast<long>( (sec - static_cast<long>(sec))*1e9 );
#else /* generic Unix */
    struct timeval tv;
    struct timespec req;
    double sec = i.seconds();
    int status = gettimeofday(&tv, NULL);
    __TBB_ASSERT_EX( status==0, "gettimeofday failed" );
    req.tv_sec  = tv.tv_sec + static_cast<long>(sec);
    req.tv_nsec = tv.tv_usec*1000 + static_cast<long>( (sec - static_cast<long>(sec))*1e9 );
#endif /*(choice of OS) */
    if( req.tv_nsec>=1e9 ) {
        req.tv_sec  += 1;
        req.tv_nsec -= static_cast<long int>(1e9);
    }
    __TBB_ASSERT( 0<=req.tv_nsec && req.tv_nsec<1e9, NULL );

    int ec;
    cv_status rc = no_timeout;
    __TBB_ASSERT( lock.owns, NULL );
    lock.owns = false;
    if( ( ec=pthread_cond_timedwait( &my_cv, lock.mutex()->native_handle(), &req ) ) ) {
        if( ec==ETIMEDOUT )
            rc = timeout;
        else {
            __TBB_ASSERT( lock.try_lock()==false, NULL );
            lock.owns = true;
            throw_exception_v4( tbb::internal::eid_condvar_wait_failed );
        }
    }
    lock.owns = true;
    return rc;
}
#endif /* !(_WIN32||_WIN64) */

} // namespace interface5

__TBB_DEFINE_PROFILING_SET_NAME(interface5::condition_variable)

} // namespace tbb 

#if TBB_IMPLEMENT_CPP0X

namespace std {

using tbb::interface5::defer_lock_t;
using tbb::interface5::try_to_lock_t;
using tbb::interface5::adopt_lock_t;
using tbb::interface5::defer_lock;
using tbb::interface5::try_to_lock;
using tbb::interface5::adopt_lock;
using tbb::interface5::lock_guard;
using tbb::interface5::unique_lock;
using tbb::interface5::swap;   /* this is for void std::swap(unique_lock<M>&,unique_lock<M>&) */
using tbb::interface5::condition_variable;
using tbb::interface5::cv_status;
using tbb::interface5::timeout;
using tbb::interface5::no_timeout;

} // namespace std 

#endif /* TBB_IMPLEMENT_CPP0X */

#endif /* __TBB_condition_variable_H */