1#ifndef QEMU_RCU_QUEUE_H
2#define QEMU_RCU_QUEUE_H
3
4/*
5 * rcu_queue.h
6 *
7 * RCU-friendly versions of the queue.h primitives.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * Copyright (c) 2013 Mike D. Day, IBM Corporation.
24 *
25 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
26 */
27
28#include "qemu/queue.h"
29#include "qemu/atomic.h"
30
31#ifdef __cplusplus
32extern "C" {
33#endif
34
35
36/*
37 * List access methods.
38 */
39#define QLIST_EMPTY_RCU(head) (atomic_read(&(head)->lh_first) == NULL)
40#define QLIST_FIRST_RCU(head) (atomic_rcu_read(&(head)->lh_first))
41#define QLIST_NEXT_RCU(elm, field) (atomic_rcu_read(&(elm)->field.le_next))
42
43/*
44 * List functions.
45 */
46
47
48/*
49 * The difference between atomic_read/set and atomic_rcu_read/set
50 * is in the including of a read/write memory barrier to the volatile
51 * access. atomic_rcu_* macros include the memory barrier, the
52 * plain atomic macros do not. Therefore, it should be correct to
53 * issue a series of reads or writes to the same element using only
54 * the atomic_* macro, until the last read or write, which should be
55 * atomic_rcu_* to introduce a read or write memory barrier as
56 * appropriate.
57 */
58
59/* Upon publication of the listelm->next value, list readers
60 * will see the new node when following next pointers from
61 * antecedent nodes, but may not see the new node when following
62 * prev pointers from subsequent nodes until after the RCU grace
63 * period expires.
64 * see linux/include/rculist.h __list_add_rcu(new, prev, next)
65 */
66#define QLIST_INSERT_AFTER_RCU(listelm, elm, field) do { \
67 (elm)->field.le_next = (listelm)->field.le_next; \
68 (elm)->field.le_prev = &(listelm)->field.le_next; \
69 atomic_rcu_set(&(listelm)->field.le_next, (elm)); \
70 if ((elm)->field.le_next != NULL) { \
71 (elm)->field.le_next->field.le_prev = \
72 &(elm)->field.le_next; \
73 } \
74} while (/*CONSTCOND*/0)
75
76/* Upon publication of the listelm->prev->next value, list
77 * readers will see the new element when following prev pointers
78 * from subsequent elements, but may not see the new element
79 * when following next pointers from antecedent elements
80 * until after the RCU grace period expires.
81 */
82#define QLIST_INSERT_BEFORE_RCU(listelm, elm, field) do { \
83 (elm)->field.le_prev = (listelm)->field.le_prev; \
84 (elm)->field.le_next = (listelm); \
85 atomic_rcu_set((listelm)->field.le_prev, (elm)); \
86 (listelm)->field.le_prev = &(elm)->field.le_next; \
87} while (/*CONSTCOND*/0)
88
89/* Upon publication of the head->first value, list readers
90 * will see the new element when following the head, but may
91 * not see the new element when following prev pointers from
92 * subsequent elements until after the RCU grace period has
93 * expired.
94 */
95#define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \
96 (elm)->field.le_prev = &(head)->lh_first; \
97 (elm)->field.le_next = (head)->lh_first; \
98 atomic_rcu_set((&(head)->lh_first), (elm)); \
99 if ((elm)->field.le_next != NULL) { \
100 (elm)->field.le_next->field.le_prev = \
101 &(elm)->field.le_next; \
102 } \
103} while (/*CONSTCOND*/0)
104
105
106/* prior to publication of the elm->prev->next value, some list
107 * readers may still see the removed element when following
108 * the antecedent's next pointer.
109 */
110#define QLIST_REMOVE_RCU(elm, field) do { \
111 if ((elm)->field.le_next != NULL) { \
112 (elm)->field.le_next->field.le_prev = \
113 (elm)->field.le_prev; \
114 } \
115 atomic_set((elm)->field.le_prev, (elm)->field.le_next); \
116} while (/*CONSTCOND*/0)
117
118/* List traversal must occur within an RCU critical section. */
119#define QLIST_FOREACH_RCU(var, head, field) \
120 for ((var) = atomic_rcu_read(&(head)->lh_first); \
121 (var); \
122 (var) = atomic_rcu_read(&(var)->field.le_next))
123
124/* List traversal must occur within an RCU critical section. */
125#define QLIST_FOREACH_SAFE_RCU(var, head, field, next_var) \
126 for ((var) = (atomic_rcu_read(&(head)->lh_first)); \
127 (var) && \
128 ((next_var) = atomic_rcu_read(&(var)->field.le_next), 1); \
129 (var) = (next_var))
130
131/*
132 * RCU simple queue
133 */
134
135/* Simple queue access methods */
136#define QSIMPLEQ_EMPTY_RCU(head) (atomic_read(&(head)->sqh_first) == NULL)
137#define QSIMPLEQ_FIRST_RCU(head) atomic_rcu_read(&(head)->sqh_first)
138#define QSIMPLEQ_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.sqe_next)
139
140/* Simple queue functions */
141#define QSIMPLEQ_INSERT_HEAD_RCU(head, elm, field) do { \
142 (elm)->field.sqe_next = (head)->sqh_first; \
143 if ((elm)->field.sqe_next == NULL) { \
144 (head)->sqh_last = &(elm)->field.sqe_next; \
145 } \
146 atomic_rcu_set(&(head)->sqh_first, (elm)); \
147} while (/*CONSTCOND*/0)
148
149#define QSIMPLEQ_INSERT_TAIL_RCU(head, elm, field) do { \
150 (elm)->field.sqe_next = NULL; \
151 atomic_rcu_set((head)->sqh_last, (elm)); \
152 (head)->sqh_last = &(elm)->field.sqe_next; \
153} while (/*CONSTCOND*/0)
154
155#define QSIMPLEQ_INSERT_AFTER_RCU(head, listelm, elm, field) do { \
156 (elm)->field.sqe_next = (listelm)->field.sqe_next; \
157 if ((elm)->field.sqe_next == NULL) { \
158 (head)->sqh_last = &(elm)->field.sqe_next; \
159 } \
160 atomic_rcu_set(&(listelm)->field.sqe_next, (elm)); \
161} while (/*CONSTCOND*/0)
162
163#define QSIMPLEQ_REMOVE_HEAD_RCU(head, field) do { \
164 atomic_set(&(head)->sqh_first, (head)->sqh_first->field.sqe_next); \
165 if ((head)->sqh_first == NULL) { \
166 (head)->sqh_last = &(head)->sqh_first; \
167 } \
168} while (/*CONSTCOND*/0)
169
170#define QSIMPLEQ_REMOVE_RCU(head, elm, type, field) do { \
171 if ((head)->sqh_first == (elm)) { \
172 QSIMPLEQ_REMOVE_HEAD_RCU((head), field); \
173 } else { \
174 struct type *curr = (head)->sqh_first; \
175 while (curr->field.sqe_next != (elm)) { \
176 curr = curr->field.sqe_next; \
177 } \
178 atomic_set(&curr->field.sqe_next, \
179 curr->field.sqe_next->field.sqe_next); \
180 if (curr->field.sqe_next == NULL) { \
181 (head)->sqh_last = &(curr)->field.sqe_next; \
182 } \
183 } \
184} while (/*CONSTCOND*/0)
185
186#define QSIMPLEQ_FOREACH_RCU(var, head, field) \
187 for ((var) = atomic_rcu_read(&(head)->sqh_first); \
188 (var); \
189 (var) = atomic_rcu_read(&(var)->field.sqe_next))
190
191#define QSIMPLEQ_FOREACH_SAFE_RCU(var, head, field, next) \
192 for ((var) = atomic_rcu_read(&(head)->sqh_first); \
193 (var) && ((next) = atomic_rcu_read(&(var)->field.sqe_next), 1); \
194 (var) = (next))
195
196/*
197 * RCU tail queue
198 */
199
200/* Tail queue access methods */
201#define QTAILQ_EMPTY_RCU(head) (atomic_read(&(head)->tqh_first) == NULL)
202#define QTAILQ_FIRST_RCU(head) atomic_rcu_read(&(head)->tqh_first)
203#define QTAILQ_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.tqe_next)
204
205/* Tail queue functions */
206#define QTAILQ_INSERT_HEAD_RCU(head, elm, field) do { \
207 (elm)->field.tqe_next = (head)->tqh_first; \
208 if ((elm)->field.tqe_next != NULL) { \
209 (head)->tqh_first->field.tqe_circ.tql_prev = \
210 &(elm)->field.tqe_circ; \
211 } else { \
212 (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
213 } \
214 atomic_rcu_set(&(head)->tqh_first, (elm)); \
215 (elm)->field.tqe_circ.tql_prev = &(head)->tqh_circ; \
216} while (/*CONSTCOND*/0)
217
218#define QTAILQ_INSERT_TAIL_RCU(head, elm, field) do { \
219 (elm)->field.tqe_next = NULL; \
220 (elm)->field.tqe_circ.tql_prev = (head)->tqh_circ.tql_prev; \
221 atomic_rcu_set(&(head)->tqh_circ.tql_prev->tql_next, (elm)); \
222 (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
223} while (/*CONSTCOND*/0)
224
225#define QTAILQ_INSERT_AFTER_RCU(head, listelm, elm, field) do { \
226 (elm)->field.tqe_next = (listelm)->field.tqe_next; \
227 if ((elm)->field.tqe_next != NULL) { \
228 (elm)->field.tqe_next->field.tqe_circ.tql_prev = \
229 &(elm)->field.tqe_circ; \
230 } else { \
231 (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
232 } \
233 atomic_rcu_set(&(listelm)->field.tqe_next, (elm)); \
234 (elm)->field.tqe_circ.tql_prev = &(listelm)->field.tqe_circ; \
235} while (/*CONSTCOND*/0)
236
237#define QTAILQ_INSERT_BEFORE_RCU(listelm, elm, field) do { \
238 (elm)->field.tqe_circ.tql_prev = (listelm)->field.tqe_circ.tql_prev; \
239 (elm)->field.tqe_next = (listelm); \
240 atomic_rcu_set(&(listelm)->field.tqe_circ.tql_prev->tql_next, (elm)); \
241 (listelm)->field.tqe_circ.tql_prev = &(elm)->field.tqe_circ; \
242} while (/*CONSTCOND*/0)
243
244#define QTAILQ_REMOVE_RCU(head, elm, field) do { \
245 if (((elm)->field.tqe_next) != NULL) { \
246 (elm)->field.tqe_next->field.tqe_circ.tql_prev = \
247 (elm)->field.tqe_circ.tql_prev; \
248 } else { \
249 (head)->tqh_circ.tql_prev = (elm)->field.tqe_circ.tql_prev; \
250 } \
251 atomic_set(&(elm)->field.tqe_circ.tql_prev->tql_next, (elm)->field.tqe_next); \
252 (elm)->field.tqe_circ.tql_prev = NULL; \
253} while (/*CONSTCOND*/0)
254
255#define QTAILQ_FOREACH_RCU(var, head, field) \
256 for ((var) = atomic_rcu_read(&(head)->tqh_first); \
257 (var); \
258 (var) = atomic_rcu_read(&(var)->field.tqe_next))
259
260#define QTAILQ_FOREACH_SAFE_RCU(var, head, field, next) \
261 for ((var) = atomic_rcu_read(&(head)->tqh_first); \
262 (var) && ((next) = atomic_rcu_read(&(var)->field.tqe_next), 1); \
263 (var) = (next))
264
265#ifdef __cplusplus
266}
267#endif
268#endif /* QEMU_RCU_QUEUE_H */
269