Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_LIST_H |
2 | #define _LINUX_LIST_H | |
3 | ||
4 | #ifdef __KERNEL__ | |
5 | ||
6 | #include <linux/stddef.h> | |
7 | #include <linux/prefetch.h> | |
8 | #include <asm/system.h> | |
9 | ||
10 | /* | |
11 | * These are non-NULL pointers that will result in page faults | |
12 | * under normal circumstances, used to verify that nobody uses | |
13 | * non-initialized list entries. | |
14 | */ | |
15 | #define LIST_POISON1 ((void *) 0x00100100) | |
16 | #define LIST_POISON2 ((void *) 0x00200200) | |
17 | ||
18 | /* | |
19 | * Simple doubly linked list implementation. | |
20 | * | |
21 | * Some of the internal functions ("__xxx") are useful when | |
22 | * manipulating whole lists rather than single entries, as | |
23 | * sometimes we already know the next/prev entries and we can | |
24 | * generate better code by using them directly rather than | |
25 | * using the generic single-entry routines. | |
26 | */ | |
27 | ||
28 | struct list_head { | |
29 | struct list_head *next, *prev; | |
30 | }; | |
31 | ||
32 | #define LIST_HEAD_INIT(name) { &(name), &(name) } | |
33 | ||
34 | #define LIST_HEAD(name) \ | |
35 | struct list_head name = LIST_HEAD_INIT(name) | |
36 | ||
37 | #define INIT_LIST_HEAD(ptr) do { \ | |
38 | (ptr)->next = (ptr); (ptr)->prev = (ptr); \ | |
39 | } while (0) | |
40 | ||
41 | /* | |
42 | * Insert a new entry between two known consecutive entries. | |
43 | * | |
44 | * This is only for internal list manipulation where we know | |
45 | * the prev/next entries already! | |
46 | */ | |
47 | static inline void __list_add(struct list_head *new, | |
48 | struct list_head *prev, | |
49 | struct list_head *next) | |
50 | { | |
51 | next->prev = new; | |
52 | new->next = next; | |
53 | new->prev = prev; | |
54 | prev->next = new; | |
55 | } | |
56 | ||
57 | /** | |
58 | * list_add - add a new entry | |
59 | * @new: new entry to be added | |
60 | * @head: list head to add it after | |
61 | * | |
62 | * Insert a new entry after the specified head. | |
63 | * This is good for implementing stacks. | |
64 | */ | |
65 | static inline void list_add(struct list_head *new, struct list_head *head) | |
66 | { | |
67 | __list_add(new, head, head->next); | |
68 | } | |
69 | ||
70 | /** | |
71 | * list_add_tail - add a new entry | |
72 | * @new: new entry to be added | |
73 | * @head: list head to add it before | |
74 | * | |
75 | * Insert a new entry before the specified head. | |
76 | * This is useful for implementing queues. | |
77 | */ | |
78 | static inline void list_add_tail(struct list_head *new, struct list_head *head) | |
79 | { | |
80 | __list_add(new, head->prev, head); | |
81 | } | |
82 | ||
83 | /* | |
84 | * Insert a new entry between two known consecutive entries. | |
85 | * | |
86 | * This is only for internal list manipulation where we know | |
87 | * the prev/next entries already! | |
88 | */ | |
89 | static inline void __list_add_rcu(struct list_head * new, | |
90 | struct list_head * prev, struct list_head * next) | |
91 | { | |
92 | new->next = next; | |
93 | new->prev = prev; | |
94 | smp_wmb(); | |
95 | next->prev = new; | |
96 | prev->next = new; | |
97 | } | |
98 | ||
99 | /** | |
100 | * list_add_rcu - add a new entry to rcu-protected list | |
101 | * @new: new entry to be added | |
102 | * @head: list head to add it after | |
103 | * | |
104 | * Insert a new entry after the specified head. | |
105 | * This is good for implementing stacks. | |
106 | * | |
107 | * The caller must take whatever precautions are necessary | |
108 | * (such as holding appropriate locks) to avoid racing | |
109 | * with another list-mutation primitive, such as list_add_rcu() | |
110 | * or list_del_rcu(), running on this same list. | |
111 | * However, it is perfectly legal to run concurrently with | |
112 | * the _rcu list-traversal primitives, such as | |
113 | * list_for_each_entry_rcu(). | |
114 | */ | |
115 | static inline void list_add_rcu(struct list_head *new, struct list_head *head) | |
116 | { | |
117 | __list_add_rcu(new, head, head->next); | |
118 | } | |
119 | ||
120 | /** | |
121 | * list_add_tail_rcu - add a new entry to rcu-protected list | |
122 | * @new: new entry to be added | |
123 | * @head: list head to add it before | |
124 | * | |
125 | * Insert a new entry before the specified head. | |
126 | * This is useful for implementing queues. | |
127 | * | |
128 | * The caller must take whatever precautions are necessary | |
129 | * (such as holding appropriate locks) to avoid racing | |
130 | * with another list-mutation primitive, such as list_add_tail_rcu() | |
131 | * or list_del_rcu(), running on this same list. | |
132 | * However, it is perfectly legal to run concurrently with | |
133 | * the _rcu list-traversal primitives, such as | |
134 | * list_for_each_entry_rcu(). | |
135 | */ | |
136 | static inline void list_add_tail_rcu(struct list_head *new, | |
137 | struct list_head *head) | |
138 | { | |
139 | __list_add_rcu(new, head->prev, head); | |
140 | } | |
141 | ||
142 | /* | |
143 | * Delete a list entry by making the prev/next entries | |
144 | * point to each other. | |
145 | * | |
146 | * This is only for internal list manipulation where we know | |
147 | * the prev/next entries already! | |
148 | */ | |
149 | static inline void __list_del(struct list_head * prev, struct list_head * next) | |
150 | { | |
151 | next->prev = prev; | |
152 | prev->next = next; | |
153 | } | |
154 | ||
155 | /** | |
156 | * list_del - deletes entry from list. | |
157 | * @entry: the element to delete from the list. | |
158 | * Note: list_empty on entry does not return true after this, the entry is | |
159 | * in an undefined state. | |
160 | */ | |
161 | static inline void list_del(struct list_head *entry) | |
162 | { | |
163 | __list_del(entry->prev, entry->next); | |
164 | entry->next = LIST_POISON1; | |
165 | entry->prev = LIST_POISON2; | |
166 | } | |
167 | ||
168 | /** | |
169 | * list_del_rcu - deletes entry from list without re-initialization | |
170 | * @entry: the element to delete from the list. | |
171 | * | |
172 | * Note: list_empty on entry does not return true after this, | |
173 | * the entry is in an undefined state. It is useful for RCU based | |
174 | * lockfree traversal. | |
175 | * | |
176 | * In particular, it means that we can not poison the forward | |
177 | * pointers that may still be used for walking the list. | |
178 | * | |
179 | * The caller must take whatever precautions are necessary | |
180 | * (such as holding appropriate locks) to avoid racing | |
181 | * with another list-mutation primitive, such as list_del_rcu() | |
182 | * or list_add_rcu(), running on this same list. | |
183 | * However, it is perfectly legal to run concurrently with | |
184 | * the _rcu list-traversal primitives, such as | |
185 | * list_for_each_entry_rcu(). | |
186 | * | |
187 | * Note that the caller is not permitted to immediately free | |
b2b18660 | 188 | * the newly deleted entry. Instead, either synchronize_rcu() |
1da177e4 LT |
189 | * or call_rcu() must be used to defer freeing until an RCU |
190 | * grace period has elapsed. | |
191 | */ | |
192 | static inline void list_del_rcu(struct list_head *entry) | |
193 | { | |
194 | __list_del(entry->prev, entry->next); | |
195 | entry->prev = LIST_POISON2; | |
196 | } | |
197 | ||
198 | /* | |
199 | * list_replace_rcu - replace old entry by new one | |
200 | * @old : the element to be replaced | |
201 | * @new : the new element to insert | |
202 | * | |
203 | * The old entry will be replaced with the new entry atomically. | |
204 | */ | |
205 | static inline void list_replace_rcu(struct list_head *old, struct list_head *new){ | |
206 | new->next = old->next; | |
207 | new->prev = old->prev; | |
208 | smp_wmb(); | |
209 | new->next->prev = new; | |
210 | new->prev->next = new; | |
211 | } | |
212 | ||
213 | /** | |
214 | * list_del_init - deletes entry from list and reinitialize it. | |
215 | * @entry: the element to delete from the list. | |
216 | */ | |
217 | static inline void list_del_init(struct list_head *entry) | |
218 | { | |
219 | __list_del(entry->prev, entry->next); | |
220 | INIT_LIST_HEAD(entry); | |
221 | } | |
222 | ||
223 | /** | |
224 | * list_move - delete from one list and add as another's head | |
225 | * @list: the entry to move | |
226 | * @head: the head that will precede our entry | |
227 | */ | |
228 | static inline void list_move(struct list_head *list, struct list_head *head) | |
229 | { | |
230 | __list_del(list->prev, list->next); | |
231 | list_add(list, head); | |
232 | } | |
233 | ||
234 | /** | |
235 | * list_move_tail - delete from one list and add as another's tail | |
236 | * @list: the entry to move | |
237 | * @head: the head that will follow our entry | |
238 | */ | |
239 | static inline void list_move_tail(struct list_head *list, | |
240 | struct list_head *head) | |
241 | { | |
242 | __list_del(list->prev, list->next); | |
243 | list_add_tail(list, head); | |
244 | } | |
245 | ||
246 | /** | |
247 | * list_empty - tests whether a list is empty | |
248 | * @head: the list to test. | |
249 | */ | |
250 | static inline int list_empty(const struct list_head *head) | |
251 | { | |
252 | return head->next == head; | |
253 | } | |
254 | ||
255 | /** | |
256 | * list_empty_careful - tests whether a list is | |
257 | * empty _and_ checks that no other CPU might be | |
258 | * in the process of still modifying either member | |
259 | * | |
260 | * NOTE: using list_empty_careful() without synchronization | |
261 | * can only be safe if the only activity that can happen | |
262 | * to the list entry is list_del_init(). Eg. it cannot be used | |
263 | * if another CPU could re-list_add() it. | |
264 | * | |
265 | * @head: the list to test. | |
266 | */ | |
267 | static inline int list_empty_careful(const struct list_head *head) | |
268 | { | |
269 | struct list_head *next = head->next; | |
270 | return (next == head) && (next == head->prev); | |
271 | } | |
272 | ||
273 | static inline void __list_splice(struct list_head *list, | |
274 | struct list_head *head) | |
275 | { | |
276 | struct list_head *first = list->next; | |
277 | struct list_head *last = list->prev; | |
278 | struct list_head *at = head->next; | |
279 | ||
280 | first->prev = head; | |
281 | head->next = first; | |
282 | ||
283 | last->next = at; | |
284 | at->prev = last; | |
285 | } | |
286 | ||
287 | /** | |
288 | * list_splice - join two lists | |
289 | * @list: the new list to add. | |
290 | * @head: the place to add it in the first list. | |
291 | */ | |
292 | static inline void list_splice(struct list_head *list, struct list_head *head) | |
293 | { | |
294 | if (!list_empty(list)) | |
295 | __list_splice(list, head); | |
296 | } | |
297 | ||
298 | /** | |
299 | * list_splice_init - join two lists and reinitialise the emptied list. | |
300 | * @list: the new list to add. | |
301 | * @head: the place to add it in the first list. | |
302 | * | |
303 | * The list at @list is reinitialised | |
304 | */ | |
305 | static inline void list_splice_init(struct list_head *list, | |
306 | struct list_head *head) | |
307 | { | |
308 | if (!list_empty(list)) { | |
309 | __list_splice(list, head); | |
310 | INIT_LIST_HEAD(list); | |
311 | } | |
312 | } | |
313 | ||
314 | /** | |
315 | * list_entry - get the struct for this entry | |
316 | * @ptr: the &struct list_head pointer. | |
317 | * @type: the type of the struct this is embedded in. | |
318 | * @member: the name of the list_struct within the struct. | |
319 | */ | |
320 | #define list_entry(ptr, type, member) \ | |
321 | container_of(ptr, type, member) | |
322 | ||
323 | /** | |
324 | * list_for_each - iterate over a list | |
325 | * @pos: the &struct list_head to use as a loop counter. | |
326 | * @head: the head for your list. | |
327 | */ | |
328 | #define list_for_each(pos, head) \ | |
329 | for (pos = (head)->next; prefetch(pos->next), pos != (head); \ | |
330 | pos = pos->next) | |
331 | ||
332 | /** | |
333 | * __list_for_each - iterate over a list | |
334 | * @pos: the &struct list_head to use as a loop counter. | |
335 | * @head: the head for your list. | |
336 | * | |
337 | * This variant differs from list_for_each() in that it's the | |
338 | * simplest possible list iteration code, no prefetching is done. | |
339 | * Use this for code that knows the list to be very short (empty | |
340 | * or 1 entry) most of the time. | |
341 | */ | |
342 | #define __list_for_each(pos, head) \ | |
343 | for (pos = (head)->next; pos != (head); pos = pos->next) | |
344 | ||
345 | /** | |
346 | * list_for_each_prev - iterate over a list backwards | |
347 | * @pos: the &struct list_head to use as a loop counter. | |
348 | * @head: the head for your list. | |
349 | */ | |
350 | #define list_for_each_prev(pos, head) \ | |
351 | for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ | |
352 | pos = pos->prev) | |
353 | ||
354 | /** | |
355 | * list_for_each_safe - iterate over a list safe against removal of list entry | |
356 | * @pos: the &struct list_head to use as a loop counter. | |
357 | * @n: another &struct list_head to use as temporary storage | |
358 | * @head: the head for your list. | |
359 | */ | |
360 | #define list_for_each_safe(pos, n, head) \ | |
361 | for (pos = (head)->next, n = pos->next; pos != (head); \ | |
362 | pos = n, n = pos->next) | |
363 | ||
364 | /** | |
365 | * list_for_each_entry - iterate over list of given type | |
366 | * @pos: the type * to use as a loop counter. | |
367 | * @head: the head for your list. | |
368 | * @member: the name of the list_struct within the struct. | |
369 | */ | |
370 | #define list_for_each_entry(pos, head, member) \ | |
371 | for (pos = list_entry((head)->next, typeof(*pos), member); \ | |
372 | prefetch(pos->member.next), &pos->member != (head); \ | |
373 | pos = list_entry(pos->member.next, typeof(*pos), member)) | |
374 | ||
375 | /** | |
376 | * list_for_each_entry_reverse - iterate backwards over list of given type. | |
377 | * @pos: the type * to use as a loop counter. | |
378 | * @head: the head for your list. | |
379 | * @member: the name of the list_struct within the struct. | |
380 | */ | |
381 | #define list_for_each_entry_reverse(pos, head, member) \ | |
382 | for (pos = list_entry((head)->prev, typeof(*pos), member); \ | |
383 | prefetch(pos->member.prev), &pos->member != (head); \ | |
384 | pos = list_entry(pos->member.prev, typeof(*pos), member)) | |
385 | ||
386 | /** | |
387 | * list_prepare_entry - prepare a pos entry for use as a start point in | |
388 | * list_for_each_entry_continue | |
389 | * @pos: the type * to use as a start point | |
390 | * @head: the head of the list | |
391 | * @member: the name of the list_struct within the struct. | |
392 | */ | |
393 | #define list_prepare_entry(pos, head, member) \ | |
394 | ((pos) ? : list_entry(head, typeof(*pos), member)) | |
395 | ||
396 | /** | |
397 | * list_for_each_entry_continue - iterate over list of given type | |
398 | * continuing after existing point | |
399 | * @pos: the type * to use as a loop counter. | |
400 | * @head: the head for your list. | |
401 | * @member: the name of the list_struct within the struct. | |
402 | */ | |
403 | #define list_for_each_entry_continue(pos, head, member) \ | |
404 | for (pos = list_entry(pos->member.next, typeof(*pos), member); \ | |
405 | prefetch(pos->member.next), &pos->member != (head); \ | |
406 | pos = list_entry(pos->member.next, typeof(*pos), member)) | |
407 | ||
408 | /** | |
409 | * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry | |
410 | * @pos: the type * to use as a loop counter. | |
411 | * @n: another type * to use as temporary storage | |
412 | * @head: the head for your list. | |
413 | * @member: the name of the list_struct within the struct. | |
414 | */ | |
415 | #define list_for_each_entry_safe(pos, n, head, member) \ | |
416 | for (pos = list_entry((head)->next, typeof(*pos), member), \ | |
417 | n = list_entry(pos->member.next, typeof(*pos), member); \ | |
418 | &pos->member != (head); \ | |
419 | pos = n, n = list_entry(n->member.next, typeof(*n), member)) | |
420 | ||
74459dc7 ACM |
421 | /** |
422 | * list_for_each_entry_safe_continue - iterate over list of given type | |
423 | * continuing after existing point safe against removal of list entry | |
424 | * @pos: the type * to use as a loop counter. | |
425 | * @n: another type * to use as temporary storage | |
426 | * @head: the head for your list. | |
427 | * @member: the name of the list_struct within the struct. | |
428 | */ | |
429 | #define list_for_each_entry_safe_continue(pos, n, head, member) \ | |
8c60f3fa ACM |
430 | for (pos = list_entry(pos->member.next, typeof(*pos), member), \ |
431 | n = list_entry(pos->member.next, typeof(*pos), member); \ | |
74459dc7 ACM |
432 | &pos->member != (head); \ |
433 | pos = n, n = list_entry(n->member.next, typeof(*n), member)) | |
434 | ||
1da177e4 LT |
435 | /** |
436 | * list_for_each_rcu - iterate over an rcu-protected list | |
437 | * @pos: the &struct list_head to use as a loop counter. | |
438 | * @head: the head for your list. | |
439 | * | |
440 | * This list-traversal primitive may safely run concurrently with | |
441 | * the _rcu list-mutation primitives such as list_add_rcu() | |
442 | * as long as the traversal is guarded by rcu_read_lock(). | |
443 | */ | |
444 | #define list_for_each_rcu(pos, head) \ | |
b24d18aa HX |
445 | for (pos = (head)->next; \ |
446 | prefetch(rcu_dereference(pos)->next), pos != (head); \ | |
447 | pos = pos->next) | |
1da177e4 LT |
448 | |
449 | #define __list_for_each_rcu(pos, head) \ | |
b24d18aa HX |
450 | for (pos = (head)->next; \ |
451 | rcu_dereference(pos) != (head); \ | |
452 | pos = pos->next) | |
1da177e4 LT |
453 | |
454 | /** | |
455 | * list_for_each_safe_rcu - iterate over an rcu-protected list safe | |
456 | * against removal of list entry | |
457 | * @pos: the &struct list_head to use as a loop counter. | |
458 | * @n: another &struct list_head to use as temporary storage | |
459 | * @head: the head for your list. | |
460 | * | |
461 | * This list-traversal primitive may safely run concurrently with | |
462 | * the _rcu list-mutation primitives such as list_add_rcu() | |
463 | * as long as the traversal is guarded by rcu_read_lock(). | |
464 | */ | |
465 | #define list_for_each_safe_rcu(pos, n, head) \ | |
b24d18aa HX |
466 | for (pos = (head)->next; \ |
467 | n = rcu_dereference(pos)->next, pos != (head); \ | |
468 | pos = n) | |
1da177e4 LT |
469 | |
470 | /** | |
471 | * list_for_each_entry_rcu - iterate over rcu list of given type | |
472 | * @pos: the type * to use as a loop counter. | |
473 | * @head: the head for your list. | |
474 | * @member: the name of the list_struct within the struct. | |
475 | * | |
476 | * This list-traversal primitive may safely run concurrently with | |
477 | * the _rcu list-mutation primitives such as list_add_rcu() | |
478 | * as long as the traversal is guarded by rcu_read_lock(). | |
479 | */ | |
b24d18aa HX |
480 | #define list_for_each_entry_rcu(pos, head, member) \ |
481 | for (pos = list_entry((head)->next, typeof(*pos), member); \ | |
482 | prefetch(rcu_dereference(pos)->member.next), \ | |
483 | &pos->member != (head); \ | |
484 | pos = list_entry(pos->member.next, typeof(*pos), member)) | |
1da177e4 LT |
485 | |
486 | ||
487 | /** | |
488 | * list_for_each_continue_rcu - iterate over an rcu-protected list | |
489 | * continuing after existing point. | |
490 | * @pos: the &struct list_head to use as a loop counter. | |
491 | * @head: the head for your list. | |
492 | * | |
493 | * This list-traversal primitive may safely run concurrently with | |
494 | * the _rcu list-mutation primitives such as list_add_rcu() | |
495 | * as long as the traversal is guarded by rcu_read_lock(). | |
496 | */ | |
497 | #define list_for_each_continue_rcu(pos, head) \ | |
b24d18aa HX |
498 | for ((pos) = (pos)->next; \ |
499 | prefetch(rcu_dereference((pos))->next), (pos) != (head); \ | |
500 | (pos) = (pos)->next) | |
1da177e4 LT |
501 | |
502 | /* | |
503 | * Double linked lists with a single pointer list head. | |
504 | * Mostly useful for hash tables where the two pointer list head is | |
505 | * too wasteful. | |
506 | * You lose the ability to access the tail in O(1). | |
507 | */ | |
508 | ||
509 | struct hlist_head { | |
510 | struct hlist_node *first; | |
511 | }; | |
512 | ||
513 | struct hlist_node { | |
514 | struct hlist_node *next, **pprev; | |
515 | }; | |
516 | ||
517 | #define HLIST_HEAD_INIT { .first = NULL } | |
518 | #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } | |
519 | #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) | |
520 | #define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL, (ptr)->pprev = NULL) | |
521 | ||
522 | static inline int hlist_unhashed(const struct hlist_node *h) | |
523 | { | |
524 | return !h->pprev; | |
525 | } | |
526 | ||
527 | static inline int hlist_empty(const struct hlist_head *h) | |
528 | { | |
529 | return !h->first; | |
530 | } | |
531 | ||
532 | static inline void __hlist_del(struct hlist_node *n) | |
533 | { | |
534 | struct hlist_node *next = n->next; | |
535 | struct hlist_node **pprev = n->pprev; | |
536 | *pprev = next; | |
537 | if (next) | |
538 | next->pprev = pprev; | |
539 | } | |
540 | ||
541 | static inline void hlist_del(struct hlist_node *n) | |
542 | { | |
543 | __hlist_del(n); | |
544 | n->next = LIST_POISON1; | |
545 | n->pprev = LIST_POISON2; | |
546 | } | |
547 | ||
548 | /** | |
549 | * hlist_del_rcu - deletes entry from hash list without re-initialization | |
550 | * @n: the element to delete from the hash list. | |
551 | * | |
552 | * Note: list_unhashed() on entry does not return true after this, | |
553 | * the entry is in an undefined state. It is useful for RCU based | |
554 | * lockfree traversal. | |
555 | * | |
556 | * In particular, it means that we can not poison the forward | |
557 | * pointers that may still be used for walking the hash list. | |
558 | * | |
559 | * The caller must take whatever precautions are necessary | |
560 | * (such as holding appropriate locks) to avoid racing | |
561 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
562 | * or hlist_del_rcu(), running on this same list. | |
563 | * However, it is perfectly legal to run concurrently with | |
564 | * the _rcu list-traversal primitives, such as | |
565 | * hlist_for_each_entry(). | |
566 | */ | |
567 | static inline void hlist_del_rcu(struct hlist_node *n) | |
568 | { | |
569 | __hlist_del(n); | |
570 | n->pprev = LIST_POISON2; | |
571 | } | |
572 | ||
573 | static inline void hlist_del_init(struct hlist_node *n) | |
574 | { | |
575 | if (n->pprev) { | |
576 | __hlist_del(n); | |
577 | INIT_HLIST_NODE(n); | |
578 | } | |
579 | } | |
580 | ||
581 | static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) | |
582 | { | |
583 | struct hlist_node *first = h->first; | |
584 | n->next = first; | |
585 | if (first) | |
586 | first->pprev = &n->next; | |
587 | h->first = n; | |
588 | n->pprev = &h->first; | |
589 | } | |
590 | ||
591 | ||
592 | /** | |
593 | * hlist_add_head_rcu - adds the specified element to the specified hlist, | |
594 | * while permitting racing traversals. | |
595 | * @n: the element to add to the hash list. | |
596 | * @h: the list to add to. | |
597 | * | |
598 | * The caller must take whatever precautions are necessary | |
599 | * (such as holding appropriate locks) to avoid racing | |
600 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
601 | * or hlist_del_rcu(), running on this same list. | |
602 | * However, it is perfectly legal to run concurrently with | |
603 | * the _rcu list-traversal primitives, such as | |
604 | * hlist_for_each_rcu(), used to prevent memory-consistency | |
605 | * problems on Alpha CPUs. Regardless of the type of CPU, the | |
606 | * list-traversal primitive must be guarded by rcu_read_lock(). | |
607 | */ | |
608 | static inline void hlist_add_head_rcu(struct hlist_node *n, | |
609 | struct hlist_head *h) | |
610 | { | |
611 | struct hlist_node *first = h->first; | |
612 | n->next = first; | |
613 | n->pprev = &h->first; | |
614 | smp_wmb(); | |
615 | if (first) | |
616 | first->pprev = &n->next; | |
617 | h->first = n; | |
618 | } | |
619 | ||
620 | /* next must be != NULL */ | |
621 | static inline void hlist_add_before(struct hlist_node *n, | |
622 | struct hlist_node *next) | |
623 | { | |
624 | n->pprev = next->pprev; | |
625 | n->next = next; | |
626 | next->pprev = &n->next; | |
627 | *(n->pprev) = n; | |
628 | } | |
629 | ||
630 | static inline void hlist_add_after(struct hlist_node *n, | |
631 | struct hlist_node *next) | |
632 | { | |
633 | next->next = n->next; | |
634 | n->next = next; | |
635 | next->pprev = &n->next; | |
636 | ||
637 | if(next->next) | |
638 | next->next->pprev = &next->next; | |
639 | } | |
640 | ||
cf4ef014 PM |
641 | /** |
642 | * hlist_add_before_rcu - adds the specified element to the specified hlist | |
643 | * before the specified node while permitting racing traversals. | |
644 | * @n: the new element to add to the hash list. | |
645 | * @next: the existing element to add the new element before. | |
646 | * | |
647 | * The caller must take whatever precautions are necessary | |
648 | * (such as holding appropriate locks) to avoid racing | |
649 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
650 | * or hlist_del_rcu(), running on this same list. | |
651 | * However, it is perfectly legal to run concurrently with | |
652 | * the _rcu list-traversal primitives, such as | |
653 | * hlist_for_each_rcu(), used to prevent memory-consistency | |
654 | * problems on Alpha CPUs. | |
655 | */ | |
e5b43760 RO |
656 | static inline void hlist_add_before_rcu(struct hlist_node *n, |
657 | struct hlist_node *next) | |
658 | { | |
659 | n->pprev = next->pprev; | |
660 | n->next = next; | |
661 | smp_wmb(); | |
662 | next->pprev = &n->next; | |
663 | *(n->pprev) = n; | |
664 | } | |
665 | ||
cf4ef014 PM |
666 | /** |
667 | * hlist_add_after_rcu - adds the specified element to the specified hlist | |
668 | * after the specified node while permitting racing traversals. | |
669 | * @prev: the existing element to add the new element after. | |
670 | * @n: the new element to add to the hash list. | |
671 | * | |
672 | * The caller must take whatever precautions are necessary | |
673 | * (such as holding appropriate locks) to avoid racing | |
674 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
675 | * or hlist_del_rcu(), running on this same list. | |
676 | * However, it is perfectly legal to run concurrently with | |
677 | * the _rcu list-traversal primitives, such as | |
678 | * hlist_for_each_rcu(), used to prevent memory-consistency | |
679 | * problems on Alpha CPUs. | |
680 | */ | |
e5b43760 RO |
681 | static inline void hlist_add_after_rcu(struct hlist_node *prev, |
682 | struct hlist_node *n) | |
683 | { | |
684 | n->next = prev->next; | |
685 | n->pprev = &prev->next; | |
686 | smp_wmb(); | |
687 | prev->next = n; | |
688 | if (n->next) | |
689 | n->next->pprev = &n->next; | |
690 | } | |
691 | ||
1da177e4 LT |
692 | #define hlist_entry(ptr, type, member) container_of(ptr,type,member) |
693 | ||
694 | #define hlist_for_each(pos, head) \ | |
695 | for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ | |
696 | pos = pos->next) | |
697 | ||
698 | #define hlist_for_each_safe(pos, n, head) \ | |
699 | for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ | |
700 | pos = n) | |
701 | ||
702 | #define hlist_for_each_rcu(pos, head) \ | |
b24d18aa HX |
703 | for ((pos) = (head)->first; \ |
704 | rcu_dereference((pos)) && ({ prefetch((pos)->next); 1; }); \ | |
705 | (pos) = (pos)->next) | |
1da177e4 LT |
706 | |
707 | /** | |
708 | * hlist_for_each_entry - iterate over list of given type | |
709 | * @tpos: the type * to use as a loop counter. | |
710 | * @pos: the &struct hlist_node to use as a loop counter. | |
711 | * @head: the head for your list. | |
712 | * @member: the name of the hlist_node within the struct. | |
713 | */ | |
714 | #define hlist_for_each_entry(tpos, pos, head, member) \ | |
715 | for (pos = (head)->first; \ | |
716 | pos && ({ prefetch(pos->next); 1;}) && \ | |
717 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | |
718 | pos = pos->next) | |
719 | ||
720 | /** | |
721 | * hlist_for_each_entry_continue - iterate over a hlist continuing after existing point | |
722 | * @tpos: the type * to use as a loop counter. | |
723 | * @pos: the &struct hlist_node to use as a loop counter. | |
724 | * @member: the name of the hlist_node within the struct. | |
725 | */ | |
726 | #define hlist_for_each_entry_continue(tpos, pos, member) \ | |
727 | for (pos = (pos)->next; \ | |
728 | pos && ({ prefetch(pos->next); 1;}) && \ | |
729 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | |
730 | pos = pos->next) | |
731 | ||
732 | /** | |
733 | * hlist_for_each_entry_from - iterate over a hlist continuing from existing point | |
734 | * @tpos: the type * to use as a loop counter. | |
735 | * @pos: the &struct hlist_node to use as a loop counter. | |
736 | * @member: the name of the hlist_node within the struct. | |
737 | */ | |
738 | #define hlist_for_each_entry_from(tpos, pos, member) \ | |
739 | for (; pos && ({ prefetch(pos->next); 1;}) && \ | |
740 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | |
741 | pos = pos->next) | |
742 | ||
743 | /** | |
744 | * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry | |
745 | * @tpos: the type * to use as a loop counter. | |
746 | * @pos: the &struct hlist_node to use as a loop counter. | |
747 | * @n: another &struct hlist_node to use as temporary storage | |
748 | * @head: the head for your list. | |
749 | * @member: the name of the hlist_node within the struct. | |
750 | */ | |
751 | #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ | |
752 | for (pos = (head)->first; \ | |
753 | pos && ({ n = pos->next; 1; }) && \ | |
754 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | |
755 | pos = n) | |
756 | ||
757 | /** | |
758 | * hlist_for_each_entry_rcu - iterate over rcu list of given type | |
759 | * @pos: the type * to use as a loop counter. | |
760 | * @pos: the &struct hlist_node to use as a loop counter. | |
761 | * @head: the head for your list. | |
762 | * @member: the name of the hlist_node within the struct. | |
763 | * | |
764 | * This list-traversal primitive may safely run concurrently with | |
e1ba0dab | 765 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() |
1da177e4 LT |
766 | * as long as the traversal is guarded by rcu_read_lock(). |
767 | */ | |
768 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ | |
769 | for (pos = (head)->first; \ | |
b24d18aa | 770 | rcu_dereference(pos) && ({ prefetch(pos->next); 1;}) && \ |
1da177e4 | 771 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
b24d18aa | 772 | pos = pos->next) |
1da177e4 LT |
773 | |
774 | #else | |
775 | #warning "don't include kernel headers in userspace" | |
776 | #endif /* __KERNEL__ */ | |
777 | #endif |