/usr/include/xenomai/asm-x86/atomic_asm.h is in libxenomai-dev 2.6.2.1-2ubuntu2.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 | /*
* Copyright (C) 2007 Philippe Gerum <rpm@xenomai.org>.
*
* Xenomai is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License,
* or (at your option) any later version.
*
* Xenomai is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Xenomai; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*/
#ifndef _XENO_ASM_X86_ATOMIC_ASM_H
#define _XENO_ASM_X86_ATOMIC_ASM_H
#ifdef CONFIG_SMP
#define LOCK_PREFIX "lock ; "
#else
#define LOCK_PREFIX ""
#endif
static inline void cpu_relax(void)
{
asm volatile("rep; nop" ::: "memory");
}
#ifdef __i386__
struct __xeno_xchg_dummy { unsigned long a[100]; };
#define __xeno_xg(x) ((struct __xeno_xchg_dummy *)(x))
static inline unsigned long xnarch_atomic_xchg (volatile void *ptr,
unsigned long x)
{
__asm__ __volatile__("xchgl %0,%1"
:"=r" (x)
:"m" (*__xeno_xg(ptr)), "0" (x)
:"memory");
return x;
}
static inline unsigned long
xnarch_atomic_cmpxchg(xnarch_atomic_t *v, unsigned long old, unsigned long newval)
{
volatile void *ptr = &v->counter;
unsigned long prev;
__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
: "=a"(prev)
: "r"(newval), "m"(*__xeno_xg(ptr)), "0"(old)
: "memory");
return prev;
}
#define xnarch_memory_barrier() __asm__ __volatile__("": : :"memory")
#define xnarch_read_memory_barrier() \
__asm__ __volatile__ (LOCK_PREFIX "addl $0,0(%%esp)": : :"memory")
#define xnarch_write_memory_barrier() \
__asm__ __volatile__ (LOCK_PREFIX "addl $0,0(%%esp)": : :"memory")
#else /* x86_64 */
#define __xeno_xg(x) ((volatile long *)(x))
static inline unsigned long xnarch_atomic_xchg (volatile void *ptr,
unsigned long x)
{
__asm__ __volatile__("xchgq %0,%1"
:"=r" (x)
:"m" (*__xeno_xg(ptr)), "0" (x)
:"memory");
return x;
}
static inline unsigned long
xnarch_atomic_cmpxchg(xnarch_atomic_t *v, unsigned long old, unsigned long newval)
{
volatile void *ptr = &v->counter;
unsigned long prev;
__asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
: "=a"(prev)
: "r"(newval), "m"(*__xeno_xg(ptr)), "0"(old)
: "memory");
return prev;
}
#define xnarch_memory_barrier() asm volatile("mfence":::"memory")
#define xnarch_read_memory_barrier() asm volatile("lfence":::"memory")
#define xnarch_write_memory_barrier() asm volatile("sfence":::"memory")
#endif /* x86_64 */
#endif /* _XENO_ASM_X86_ATOMIC_ASM_H */
|