/usr/include/alsa/iatomic.h is in libasound2-dev 1.0.28-1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 | #ifndef __ALSA_IATOMIC_H
#define __ALSA_IATOMIC_H
#ifdef __i386__
#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
#define rmb() mb()
#define wmb() __asm__ __volatile__ ("": : :"memory")
#define IATOMIC_DEFINED 1
#endif
#ifdef __x86_64__
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
#define wmb() asm volatile("sfence":::"memory")
#define IATOMIC_DEFINED 1
#endif
#ifdef __ia64__
/*
* Macros to force memory ordering. In these descriptions, "previous"
* and "subsequent" refer to program order; "visible" means that all
* architecturally visible effects of a memory access have occurred
* (at a minimum, this means the memory has been read or written).
*
* wmb(): Guarantees that all preceding stores to memory-
* like regions are visible before any subsequent
* stores and that all following stores will be
* visible only after all previous stores.
* rmb(): Like wmb(), but for reads.
* mb(): wmb()/rmb() combo, i.e., all previous memory
* accesses are visible before all subsequent
* accesses and vice versa. This is also known as
* a "fence."
*
* Note: "mb()" and its variants cannot be used as a fence to order
* accesses to memory mapped I/O registers. For that, mf.a needs to
* be used. However, we don't want to always use mf.a because (a)
* it's (presumably) much slower than mf and (b) mf.a is supported for
* sequential memory pages only.
*/
#define mb() __asm__ __volatile__ ("mf" ::: "memory")
#define rmb() mb()
#define wmb() mb()
#define IATOMIC_DEFINED 1
#endif /* __ia64__ */
#ifdef __alpha__
#define mb() \
__asm__ __volatile__("mb": : :"memory")
#define rmb() \
__asm__ __volatile__("mb": : :"memory")
#define wmb() \
__asm__ __volatile__("wmb": : :"memory")
#define IATOMIC_DEFINED 1
#endif /* __alpha__ */
#ifdef __powerpc__
/*
* Memory barrier.
* The sync instruction guarantees that all memory accesses initiated
* by this processor have been performed (with respect to all other
* mechanisms that access memory). The eieio instruction is a barrier
* providing an ordering (separately) for (a) cacheable stores and (b)
* loads and stores to non-cacheable memory (e.g. I/O devices).
*
* mb() prevents loads and stores being reordered across this point.
* rmb() prevents loads being reordered across this point.
* wmb() prevents stores being reordered across this point.
*
* We can use the eieio instruction for wmb, but since it doesn't
* give any ordering guarantees about loads, we have to use the
* stronger but slower sync instruction for mb and rmb.
*/
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
#define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
#define IATOMIC_DEFINED 1
#endif /* __powerpc__ */
#ifndef IATOMIC_DEFINED
/* Generic __sync_synchronize is available from gcc 4.1 */
#define mb() __sync_synchronize()
#define rmb() mb()
#define wmb() mb()
#define IATOMIC_DEFINED 1
#endif /* IATOMIC_DEFINED */
/*
* Atomic read/write
* Copyright (c) 2001 by Abramo Bagnara <abramo@alsa-project.org>
*/
/* Max number of times we must spin on a spin-lock calling sched_yield().
After MAX_SPIN_COUNT iterations, we put the calling thread to sleep. */
#ifndef MAX_SPIN_COUNT
#define MAX_SPIN_COUNT 50
#endif
/* Duration of sleep (in nanoseconds) when we can't acquire a spin-lock
after MAX_SPIN_COUNT iterations of sched_yield().
This MUST BE > 2ms.
(Otherwise the kernel does busy-waiting for real-time threads,
giving other threads no chance to run.) */
#ifndef SPIN_SLEEP_DURATION
#define SPIN_SLEEP_DURATION 2000001
#endif
typedef struct {
unsigned int begin, end;
} snd_atomic_write_t;
typedef struct {
volatile const snd_atomic_write_t *write;
unsigned int end;
} snd_atomic_read_t;
void snd_atomic_read_wait(snd_atomic_read_t *t);
static __inline__ void snd_atomic_write_init(snd_atomic_write_t *w)
{
w->begin = 0;
w->end = 0;
}
static __inline__ void snd_atomic_write_begin(snd_atomic_write_t *w)
{
w->begin++;
wmb();
}
static __inline__ void snd_atomic_write_end(snd_atomic_write_t *w)
{
wmb();
w->end++;
}
static __inline__ void snd_atomic_read_init(snd_atomic_read_t *r, snd_atomic_write_t *w)
{
r->write = w;
}
static __inline__ void snd_atomic_read_begin(snd_atomic_read_t *r)
{
r->end = r->write->end;
rmb();
}
static __inline__ int snd_atomic_read_ok(snd_atomic_read_t *r)
{
rmb();
return r->end == r->write->begin;
}
#endif /* __ALSA_IATOMIC_H */
|