/usr/include/xenomai/nucleus/heap.h is in libxenomai-dev 2.6.3-2.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 | /*
* @note Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
*
* Xenomai is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License,
* or (at your option) any later version.
*
* Xenomai is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Xenomai; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*
* \ingroup heap
*/
#ifndef _XENO_NUCLEUS_HEAP_H
#define _XENO_NUCLEUS_HEAP_H
#include <nucleus/queue.h>
/*
* CONSTRAINTS:
*
* Minimum page size is 2 ** XNHEAP_MINLOG2 (must be large enough to
* hold a pointer).
*
* Maximum page size is 2 ** XNHEAP_MAXLOG2.
*
* Minimum block size equals the minimum page size.
*
* Requested block size smaller than the minimum block size is
* rounded to the minimum block size.
*
* Requested block size larger than 2 times the page size is rounded
* to the next page boundary and obtained from the free page
* list. So we need a bucket for each power of two between
* XNHEAP_MINLOG2 and XNHEAP_MAXLOG2 inclusive, plus one to honor
* requests ranging from the maximum page size to twice this size.
*/
#if defined(__KERNEL__) || defined(__XENO_SIM__)
#define XNHEAP_PAGE_SIZE 512 /* A reasonable value for the xnheap page size */
#define XNHEAP_PAGE_MASK (~(XNHEAP_PAGE_SIZE-1))
#define XNHEAP_PAGE_ALIGN(addr) (((addr)+XNHEAP_PAGE_SIZE-1)&XNHEAP_PAGE_MASK)
#define XNHEAP_MINLOG2 3
#define XNHEAP_MAXLOG2 22 /* Must hold pagemap::bcount objects */
#define XNHEAP_MINALLOCSZ (1 << XNHEAP_MINLOG2)
#define XNHEAP_MINALIGNSZ (1 << 4) /* i.e. 16 bytes */
#define XNHEAP_NBUCKETS (XNHEAP_MAXLOG2 - XNHEAP_MINLOG2 + 2)
#define XNHEAP_MAXEXTSZ (1 << 31) /* i.e. 2Gb */
#define XNHEAP_PFREE 0
#define XNHEAP_PCONT 1
#define XNHEAP_PLIST 2
#define XNHEAP_GFP_NONCACHED (1 << __GFP_BITS_SHIFT)
struct xnpagemap {
unsigned int type : 8; /* PFREE, PCONT, PLIST or log2 */
unsigned int bcount : 24; /* Number of active blocks. */
};
typedef struct xnextent {
xnholder_t link;
#define link2extent(ln) container_of(ln, xnextent_t, link)
caddr_t membase, /* Base address of the page array */
memlim, /* Memory limit of page array */
freelist; /* Head of the free page list */
struct xnpagemap pagemap[1]; /* Beginning of page map */
} xnextent_t;
typedef struct xnheap {
xnholder_t link;
#define link2heap(ln) container_of(ln, xnheap_t, link)
u_long extentsize,
pagesize,
pageshift,
hdrsize,
npages, /* Number of pages per extent */
ubytes,
maxcont;
xnqueue_t extents;
DECLARE_XNLOCK(lock);
struct xnbucket {
caddr_t freelist;
int fcount;
} buckets[XNHEAP_NBUCKETS];
xnholder_t *idleq[XNARCH_NR_CPUS];
xnarch_heapcb_t archdep;
XNARCH_DECL_DISPLAY_CONTEXT();
xnholder_t stat_link; /* Link in heapq */
char label[XNOBJECT_NAME_LEN+16];
} xnheap_t;
extern xnheap_t kheap;
#if CONFIG_XENO_OPT_SYS_STACKPOOLSZ > 0
extern xnheap_t kstacks;
#endif
#define xnheap_extentsize(heap) ((heap)->extentsize)
#define xnheap_page_size(heap) ((heap)->pagesize)
#define xnheap_page_count(heap) ((heap)->npages)
#define xnheap_usable_mem(heap) ((heap)->maxcont * countq(&(heap)->extents))
#define xnheap_used_mem(heap) ((heap)->ubytes)
#define xnheap_max_contiguous(heap) ((heap)->maxcont)
static inline size_t xnheap_align(size_t size, size_t al)
{
/* The alignment value must be a power of 2 */
return ((size+al-1)&(~(al-1)));
}
static inline size_t xnheap_external_overhead(size_t hsize, size_t psize)
{
size_t pages = (hsize + psize - 1) / psize;
return xnheap_align(sizeof(xnextent_t)
+ pages * sizeof(struct xnpagemap), psize);
}
static inline size_t xnheap_internal_overhead(size_t hsize, size_t psize)
{
/* o = (h - o) * m / p + e
o * p = (h - o) * m + e * p
o * (p + m) = h * m + e * p
o = (h * m + e *p) / (p + m)
*/
return xnheap_align((sizeof(xnextent_t) * psize
+ sizeof(struct xnpagemap) * hsize)
/ (psize + sizeof(struct xnpagemap)), psize);
}
#define xnmalloc(size) xnheap_alloc(&kheap,size)
#define xnfree(ptr) xnheap_free(&kheap,ptr)
#define xnfreesync() xnheap_finalize_free(&kheap)
#define xnfreesafe(thread, ptr, ln) \
do { \
if (xnpod_current_p(thread)) \
xnheap_schedule_free(&kheap, ptr, ln); \
else \
xnheap_free(&kheap,ptr); \
} while(0)
static inline size_t xnheap_rounded_size(size_t hsize, size_t psize)
{
/*
* Account for the minimum heap size (i.e. 2 * page size) plus
* overhead so that the actual heap space is large enough to
* match the requested size. Using a small page size for large
* single-block heaps might reserve a lot of useless page map
* memory, but this should never get pathological anyway,
* since we only consume 4 bytes per page.
*/
if (hsize < 2 * psize)
hsize = 2 * psize;
hsize += xnheap_external_overhead(hsize, psize);
return xnheap_align(hsize, psize);
}
#ifdef __cplusplus
extern "C" {
#endif
/* Private interface. */
#ifdef __KERNEL__
int xnheap_mount(void);
void xnheap_umount(void);
void xnheap_init_proc(void);
void xnheap_cleanup_proc(void);
int xnheap_init_mapped(xnheap_t *heap,
u_long heapsize,
int memflags);
void xnheap_destroy_mapped(xnheap_t *heap,
void (*release)(struct xnheap *heap),
void __user *mapaddr);
#define xnheap_base_memory(heap) \
((unsigned long)((heap)->archdep.heapbase))
#define xnheap_mapped_offset(heap,ptr) \
(((caddr_t)(ptr)) - (caddr_t)xnheap_base_memory(heap))
#define xnheap_mapped_address(heap,off) \
((caddr_t)xnheap_base_memory(heap) + (off))
#define xnheap_mapped_p(heap) \
(xnheap_base_memory(heap) != 0)
#endif /* __KERNEL__ */
/* Public interface. */
int xnheap_init(xnheap_t *heap,
void *heapaddr,
u_long heapsize,
u_long pagesize);
void xnheap_set_label(xnheap_t *heap, const char *name, ...);
void xnheap_destroy(xnheap_t *heap,
void (*flushfn)(xnheap_t *heap,
void *extaddr,
u_long extsize,
void *cookie),
void *cookie);
int xnheap_extend(xnheap_t *heap,
void *extaddr,
u_long extsize);
void *xnheap_alloc(xnheap_t *heap,
u_long size);
int xnheap_test_and_free(xnheap_t *heap,
void *block,
int (*ckfn)(void *block));
int xnheap_free(xnheap_t *heap,
void *block);
void xnheap_schedule_free(xnheap_t *heap,
void *block,
xnholder_t *link);
void xnheap_finalize_free_inner(xnheap_t *heap,
int cpu);
static inline void xnheap_finalize_free(xnheap_t *heap)
{
int cpu = xnarch_current_cpu();
XENO_ASSERT(NUCLEUS,
spltest() != 0,
xnpod_fatal("%s called in unsafe context", __FUNCTION__));
if (heap->idleq[cpu])
xnheap_finalize_free_inner(heap, cpu);
}
int xnheap_check_block(xnheap_t *heap,
void *block);
#ifdef __cplusplus
}
#endif
#endif /* __KERNEL__ || __XENO_SIM__ */
#define XNHEAP_DEV_NAME "/dev/rtheap"
#define XNHEAP_DEV_MINOR 254
/* Possible arguments to the sys_heap_info syscall */
#define XNHEAP_PROC_PRIVATE_HEAP 0
#define XNHEAP_PROC_SHARED_HEAP 1
#define XNHEAP_SYS_HEAP 2
#define XNHEAP_SYS_STACKPOOL 3
struct xnheap_desc {
unsigned long handle;
unsigned int size;
unsigned long area;
unsigned long used;
};
#endif /* !_XENO_NUCLEUS_HEAP_H */
|