/usr/include/asm-x86_64/rtai_atomic.h is in librtai-dev 3.9.1-4.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 | /*
* Copyright (C) 2007 Paolo Mantegazza <mantegazza@aero.polimi.it>.
* Copyright (C) 2003 Philippe Gerum <rpm@xenomai.org>.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _RTAI_ASM_X8664_ATOMIC_H
#define _RTAI_ASM_X8664_ATOMIC_H
#ifdef __KERNEL__
#include <linux/bitops.h>
#include <asm/atomic.h>
#include <asm/system.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
#define atomic_xchg(ptr, v) xchg(ptr, v)
#define atomic_cmpxchg(ptr, o, n) cmpxchg((unsigned long *)(ptr), o, n)
#endif
#else /* !__KERNEL__ */
#ifdef CONFIG_SMP
#define LOCK_PREFIX "lock ; "
#else
#define LOCK_PREFIX ""
#endif
typedef struct { volatile int counter; } atomic_t;
//#define atomic_t long
struct __rtai_xchg_dummy { unsigned long a[100]; };
#define __rtai_xg(x) ((struct __rtai_xchg_dummy *)(x))
static inline unsigned long atomic_xchg (volatile void *ptr, unsigned long x)
{
__asm__ __volatile__(LOCK_PREFIX "xchgq %0,%1"
:"=r" (x)
:"m" (*__rtai_xg(ptr)), "0" (x)
:"memory");
return x;
}
static inline unsigned long atomic_cmpxchg (volatile void *ptr, unsigned long o, unsigned long n)
{
unsigned long prev;
__asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
: "=a"(prev)
: "q"(n), "m" (*__rtai_xg(ptr)), "0" (o)
: "memory");
return prev;
}
static __inline__ int atomic_dec_and_test(atomic_t *v)
{
unsigned char c;
__asm__ __volatile__(
LOCK_PREFIX "decl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
}
static __inline__ void atomic_inc(atomic_t *v)
{
__asm__ __volatile__(
LOCK_PREFIX "incl %0"
:"=m" (v->counter)
:"m" (v->counter));
}
/* Depollute the namespace a bit. */
#undef ADDR
#endif /* __KERNEL__ */
#endif /* !_RTAI_ASM_X8664_ATOMIC_H */
|