/usr/include/urcu/static/lfstack.h is in liburcu-dev 0.9.1-3.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 | #ifndef _URCU_STATIC_LFSTACK_H
#define _URCU_STATIC_LFSTACK_H
/*
* urcu/static/lfstack.h
*
* Userspace RCU library - Lock-Free Stack
*
* Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/lfstack.h for
* linking dynamically with the userspace rcu library.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdbool.h>
#include <pthread.h>
#include <assert.h>
#include <urcu/uatomic.h>
#include <urcu-pointer.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Lock-free stack.
*
* Stack implementing push, pop, pop_all operations, as well as iterator
* on the stack head returned by pop_all.
*
* Synchronization table:
*
* External synchronization techniques described in the API below is
* required between pairs marked with "X". No external synchronization
* required between pairs marked with "-".
*
* cds_lfs_push __cds_lfs_pop __cds_lfs_pop_all
* cds_lfs_push - - -
* __cds_lfs_pop - X X
* __cds_lfs_pop_all - X -
*
* cds_lfs_pop_blocking and cds_lfs_pop_all_blocking use an internal
* mutex to provide synchronization.
*/
/*
* cds_lfs_node_init: initialize lock-free stack node.
*/
static inline
void _cds_lfs_node_init(struct cds_lfs_node *node)
{
}
/*
* cds_lfs_init: initialize lock-free stack.
*/
static inline
void _cds_lfs_init(struct cds_lfs_stack *s)
{
int ret;
s->head = NULL;
ret = pthread_mutex_init(&s->lock, NULL);
assert(!ret);
}
/*
* ___cds_lfs_init: initialize lock-free stack.
*/
static inline
void ___cds_lfs_init(struct __cds_lfs_stack *s)
{
s->head = NULL;
}
static inline
bool ___cds_lfs_empty_head(struct cds_lfs_head *head)
{
return head == NULL;
}
/*
* cds_lfs_empty: return whether lock-free stack is empty.
*
* No memory barrier is issued. No mutual exclusion is required.
*/
static inline
bool _cds_lfs_empty(cds_lfs_stack_ptr_t s)
{
return ___cds_lfs_empty_head(CMM_LOAD_SHARED(s._s->head));
}
/*
* cds_lfs_push: push a node into the stack.
*
* Does not require any synchronization with other push nor pop.
*
* Lock-free stack push is not subject to ABA problem, so no need to
* take the RCU read-side lock. Even if "head" changes between two
* uatomic_cmpxchg() invocations here (being popped, and then pushed
* again by one or more concurrent threads), the second
* uatomic_cmpxchg() invocation only cares about pushing a new entry at
* the head of the stack, ensuring consistency by making sure the new
* node->next is the same pointer value as the value replaced as head.
* It does not care about the content of the actual next node, so it can
* very well be reallocated between the two uatomic_cmpxchg().
*
* We take the approach of expecting the stack to be usually empty, so
* we first try an initial uatomic_cmpxchg() on a NULL old_head, and
* retry if the old head was non-NULL (the value read by the first
* uatomic_cmpxchg() is used as old head for the following loop). The
* upside of this scheme is to minimize the amount of cacheline traffic,
* always performing an exclusive cacheline access, rather than doing
* non-exclusive followed by exclusive cacheline access (which would be
* required if we first read the old head value). This design decision
* might be revisited after more thorough benchmarking on various
* platforms.
*
* Returns 0 if the stack was empty prior to adding the node.
* Returns non-zero otherwise.
*/
static inline
bool _cds_lfs_push(cds_lfs_stack_ptr_t u_s,
struct cds_lfs_node *node)
{
struct __cds_lfs_stack *s = u_s._s;
struct cds_lfs_head *head = NULL;
struct cds_lfs_head *new_head =
caa_container_of(node, struct cds_lfs_head, node);
for (;;) {
struct cds_lfs_head *old_head = head;
/*
* node->next is still private at this point, no need to
* perform a _CMM_STORE_SHARED().
*/
node->next = &head->node;
/*
* uatomic_cmpxchg() implicit memory barrier orders earlier
* stores to node before publication.
*/
head = uatomic_cmpxchg(&s->head, old_head, new_head);
if (old_head == head)
break;
}
return !___cds_lfs_empty_head(head);
}
/*
* __cds_lfs_pop: pop a node from the stack.
*
* Returns NULL if stack is empty.
*
* __cds_lfs_pop needs to be synchronized using one of the following
* techniques:
*
* 1) Calling __cds_lfs_pop under rcu read lock critical section.
* Both __cds_lfs_pop and __cds_lfs_pop_all callers must wait for a
* grace period to pass before freeing the returned node or pushing
* the node back into the stack. It is valid to overwrite the content
* of cds_lfs_node immediately after __cds_lfs_pop and
* __cds_lfs_pop_all. No RCU read-side critical section is needed
* around __cds_lfs_pop_all.
* 2) Using mutual exclusion (e.g. mutexes) to protect __cds_lfs_pop
* and __cds_lfs_pop_all callers.
* 3) Ensuring that only ONE thread can call __cds_lfs_pop() and
* __cds_lfs_pop_all(). (multi-provider/single-consumer scheme).
*/
static inline
struct cds_lfs_node *___cds_lfs_pop(cds_lfs_stack_ptr_t u_s)
{
struct __cds_lfs_stack *s = u_s._s;
for (;;) {
struct cds_lfs_head *head, *next_head;
struct cds_lfs_node *next;
head = _CMM_LOAD_SHARED(s->head);
if (___cds_lfs_empty_head(head))
return NULL; /* Empty stack */
/*
* Read head before head->next. Matches the implicit
* memory barrier before uatomic_cmpxchg() in
* cds_lfs_push.
*/
cmm_smp_read_barrier_depends();
next = _CMM_LOAD_SHARED(head->node.next);
next_head = caa_container_of(next,
struct cds_lfs_head, node);
if (uatomic_cmpxchg(&s->head, head, next_head) == head)
return &head->node;
/* busy-loop if head changed under us */
}
}
/*
* __cds_lfs_pop_all: pop all nodes from a stack.
*
* __cds_lfs_pop_all does not require any synchronization with other
* push, nor with other __cds_lfs_pop_all, but requires synchronization
* matching the technique used to synchronize __cds_lfs_pop:
*
* 1) If __cds_lfs_pop is called under rcu read lock critical section,
* both __cds_lfs_pop and __cds_lfs_pop_all callers must wait for a
* grace period to pass before freeing the returned node or pushing
* the node back into the stack. It is valid to overwrite the content
* of cds_lfs_node immediately after __cds_lfs_pop and
* __cds_lfs_pop_all. No RCU read-side critical section is needed
* around __cds_lfs_pop_all.
* 2) Using mutual exclusion (e.g. mutexes) to protect __cds_lfs_pop and
* __cds_lfs_pop_all callers.
* 3) Ensuring that only ONE thread can call __cds_lfs_pop() and
* __cds_lfs_pop_all(). (multi-provider/single-consumer scheme).
*/
static inline
struct cds_lfs_head *___cds_lfs_pop_all(cds_lfs_stack_ptr_t u_s)
{
struct __cds_lfs_stack *s = u_s._s;
/*
* Implicit memory barrier after uatomic_xchg() matches implicit
* memory barrier before uatomic_cmpxchg() in cds_lfs_push. It
* ensures that all nodes of the returned list are consistent.
* There is no need to issue memory barriers when iterating on
* the returned list, because the full memory barrier issued
* prior to each uatomic_cmpxchg, which each write to head, are
* taking care to order writes to each node prior to the full
* memory barrier after this uatomic_xchg().
*/
return uatomic_xchg(&s->head, NULL);
}
/*
* cds_lfs_pop_lock: lock stack pop-protection mutex.
*/
static inline void _cds_lfs_pop_lock(struct cds_lfs_stack *s)
{
int ret;
ret = pthread_mutex_lock(&s->lock);
assert(!ret);
}
/*
* cds_lfs_pop_unlock: unlock stack pop-protection mutex.
*/
static inline void _cds_lfs_pop_unlock(struct cds_lfs_stack *s)
{
int ret;
ret = pthread_mutex_unlock(&s->lock);
assert(!ret);
}
/*
* Call __cds_lfs_pop with an internal pop mutex held.
*/
static inline
struct cds_lfs_node *
_cds_lfs_pop_blocking(struct cds_lfs_stack *s)
{
struct cds_lfs_node *retnode;
_cds_lfs_pop_lock(s);
retnode = ___cds_lfs_pop(s);
_cds_lfs_pop_unlock(s);
return retnode;
}
/*
* Call __cds_lfs_pop_all with an internal pop mutex held.
*/
static inline
struct cds_lfs_head *
_cds_lfs_pop_all_blocking(struct cds_lfs_stack *s)
{
struct cds_lfs_head *rethead;
_cds_lfs_pop_lock(s);
rethead = ___cds_lfs_pop_all(s);
_cds_lfs_pop_unlock(s);
return rethead;
}
#ifdef __cplusplus
}
#endif
#endif /* _URCU_STATIC_LFSTACK_H */
|