/usr/include/infiniband/arch.h is in libibverbs-dev 1.2.1-2.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 | /*
* Copyright (c) 2005 Topspin Communications. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef INFINIBAND_ARCH_H
#define INFINIBAND_ARCH_H
#include <stdint.h>
#include <endian.h>
#include <byteswap.h>
#if __BYTE_ORDER == __LITTLE_ENDIAN
static inline uint64_t htonll(uint64_t x) { return bswap_64(x); }
static inline uint64_t ntohll(uint64_t x) { return bswap_64(x); }
#elif __BYTE_ORDER == __BIG_ENDIAN
static inline uint64_t htonll(uint64_t x) { return x; }
static inline uint64_t ntohll(uint64_t x) { return x; }
#else
#error __BYTE_ORDER is neither __LITTLE_ENDIAN nor __BIG_ENDIAN
#endif
/*
* Architecture-specific defines. Currently, an architecture is
* required to implement the following operations:
*
* mb() - memory barrier. No loads or stores may be reordered across
* this macro by either the compiler or the CPU.
* rmb() - read memory barrier. No loads may be reordered across this
* macro by either the compiler or the CPU.
* wmb() - write memory barrier. No stores may be reordered across
* this macro by either the compiler or the CPU.
* wc_wmb() - flush write combine buffers. No write-combined writes
* will be reordered across this macro by either the compiler or
* the CPU.
*/
#if defined(__i386__)
#define mb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
#define rmb() mb()
#define wmb() asm volatile("" ::: "memory")
#define wc_wmb() mb()
#elif defined(__x86_64__)
/*
* Only use lfence for mb() and rmb() because we don't care about
* ordering against non-temporal stores (for now at least).
*/
#define mb() asm volatile("lfence" ::: "memory")
#define rmb() mb()
#define wmb() asm volatile("" ::: "memory")
#define wc_wmb() asm volatile("sfence" ::: "memory")
#elif defined(__PPC64__)
#define mb() asm volatile("sync" ::: "memory")
#define rmb() asm volatile("lwsync" ::: "memory")
#define wmb() mb()
#define wc_wmb() wmb()
#elif defined(__ia64__)
#define mb() asm volatile("mf" ::: "memory")
#define rmb() mb()
#define wmb() mb()
#define wc_wmb() asm volatile("fwb" ::: "memory")
#elif defined(__PPC__)
#define mb() asm volatile("sync" ::: "memory")
#define rmb() mb()
#define wmb() mb()
#define wc_wmb() wmb()
#elif defined(__sparc_v9__)
#define mb() asm volatile("membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad" ::: "memory")
#define rmb() asm volatile("membar #LoadLoad" ::: "memory")
#define wmb() asm volatile("membar #StoreStore" ::: "memory")
#define wc_wmb() wmb()
#elif defined(__sparc__)
#define mb() asm volatile("" ::: "memory")
#define rmb() mb()
#define wmb() mb()
#define wc_wmb() wmb()
#elif defined(__s390x__)
#define mb() { asm volatile("" : : : "memory"); } /* for s390x */
#define rmb() mb() /* for s390x */
#define wmb() mb() /* for s390x */
#define wc_wmb() wmb() /* for s390x */
#elif defined(__aarch64__)
/* Perhaps dmb would be sufficient? Let us be conservative for now. */
#define mb() { asm volatile("dsb sy" ::: "memory"); }
#define rmb() { asm volatile("dsb ld" ::: "memory"); }
#define wmb() { asm volatile("dsb st" ::: "memory"); }
#define wc_wmb() wmb()
#elif defined(__mips__)
#define mb() { asm volatile("sync" ::: "memory"); }
#define rmb() { asm volatile("sync_rmb" ::: "memory"); }
#define wmb() { asm volatile("sync_wmb" ::: "memory"); }
#define wc_wmb() wmb()
#else
#warning No architecture specific memory barrier defines found!
#endif
#endif /* INFINIBAND_ARCH_H */
|