This file is indexed.

/usr/include/Yap/locks_x86.h is in yap 6.2.2-6.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
/************************************************************************
**                                                                     **
**                   The YapTab/YapOr/OPTYap systems                   **
**                                                                     **
** YapTab extends the Yap Prolog engine to support sequential tabling  **
** YapOr extends the Yap Prolog engine to support or-parallelism       **
** OPTYap extends the Yap Prolog engine to support or-parallel tabling **
**                                                                     **
**                                                                     **
**      YAP Prolog was developed at University of Porto, Portugal      **
**                                                                     **
************************************************************************/

/************************************************************************
**                        Atomic locks for X86                         **
************************************************************************/

typedef struct {
    volatile unsigned int lock;
} spinlock_t;

static inline int
spin_trylock(spinlock_t *lock)
{
    char tmp = 1;
    __asm__ __volatile__(
			 "xchgb %b0, %1"
			 : "=q"(tmp), "=m"(lock->lock)
			 : "0"(tmp) : "memory");
    return tmp == 0;
}

static inline void
spin_unlock(spinlock_t *lock)
{
    /* To unlock we move 0 to the lock.
     * On i386 this needs to be a locked operation
     * to avoid Pentium Pro errata 66 and 92.
     */
#if defined(__x86_64__)
    __asm__ __volatile__("" : : : "memory");
    *(unsigned char*)&lock->lock = 0;
#else
    char tmp = 0;
    __asm__ __volatile__(
			 "xchgb %b0, %1"
			 : "=q"(tmp), "=m"(lock->lock)
			 : "0"(tmp) : "memory");
#endif
}

#define TRY_LOCK(LOCK_VAR)  spin_trylock((spinlock_t *)(LOCK_VAR))

#define INIT_LOCK(LOCK_VAR)    ((LOCK_VAR) = 0)
#define LOCK(LOCK_VAR)         do {	\
                                 if (TRY_LOCK(&(LOCK_VAR))) break;      \
		                 while (IS_LOCKED(LOCK_VAR)) continue;  \
                               } while (1)
#define IS_LOCKED(LOCK_VAR)    ((LOCK_VAR) != 0)
#define IS_UNLOCKED(LOCK_VAR)  ((LOCK_VAR) == 0)
#define UNLOCK(LOCK_VAR)       spin_unlock((spinlock_t *)&(LOCK_VAR))

/* the code that follows has been adapted from the Erlang sources */

typedef struct {
    volatile int lock;
} rwlock_t;

#define RWLOCK_OFFSET (1<<24)

static inline void
init_rwlock(rwlock_t *lock)
{
    lock->lock = 0;
}

static inline void
read_unlock(rwlock_t *lock)
{
    __asm__ __volatile__(
			 "lock; decl %0"
			 : "=m"(lock->lock)
			 : "m"(lock->lock)
			 );
}

static inline int
read_trylock(rwlock_t *lock)
{
    int tmp;

    tmp = 1;
    __asm__ __volatile__(
			 "lock; xaddl %0, %1"
			 : "=r"(tmp)
			 : "m"(lock->lock), "0"(tmp));
    /* tmp is now the lock's previous value */
    if (__builtin_expect(tmp >= 0, 1))
      return 1;
    read_unlock(lock);
    return 0;
}

static inline int
read_is_locked(rwlock_t *lock)
{
    return lock->lock < 0;
}

static inline void
read_lock(rwlock_t *lock)
{
    for(;;) {
      if (__builtin_expect(read_trylock(lock) != 0, 1))
	break;
      do {
	__asm__ __volatile__("rep;nop" : "=m"(lock->lock) : : "memory");
      } while (read_is_locked(lock));
    }
}

static inline void
write_unlock(rwlock_t *lock)
{
    __asm__ __volatile__(
			 "lock; addl %2,%0"
			 : "=m"(lock->lock)
			 : "m"(lock->lock), "i"(RWLOCK_OFFSET));
}

static inline int
write_trylock(rwlock_t *lock)
{
    int tmp;

    tmp = -RWLOCK_OFFSET;
    __asm__ __volatile__(
			 "lock; xaddl %0, %1"
			 : "=r"(tmp)
			 : "m"(lock->lock), "0"(tmp));
    /* tmp is now the lock's previous value */
    if (__builtin_expect(tmp == 0, 1))
      return 1;
    write_unlock(lock);
    return 0;
}

static inline int
write_is_locked(rwlock_t *lock)
{
    return lock->lock != 0;
}

static inline void
write_lock(rwlock_t *lock)
{
    for(;;) {
      if (__builtin_expect(write_trylock(lock) != 0, 1))
	break;
      do {
	__asm__ __volatile__("rep;nop" : "=m"(lock->lock) : : "memory");
      } while (write_is_locked(lock));
    }
}

#define INIT_RWLOCK(lock) init_rwlock(&(lock))
#define READ_LOCK(lock) read_lock(&(lock))
#define READ_UNLOCK(lock) read_unlock(&(lock))
#define WRITE_LOCK(lock) write_lock(&(lock))
#define WRITE_UNLOCK(lock) write_unlock(&(lock))