Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
[deliverable/linux.git] / include / linux / list.h
1 #ifndef _LINUX_LIST_H
2 #define _LINUX_LIST_H
3
4 #ifdef __KERNEL__
5
6 #include <linux/stddef.h>
7 #include <linux/poison.h>
8 #include <linux/prefetch.h>
9 #include <asm/system.h>
10
11 /*
12 * Simple doubly linked list implementation.
13 *
14 * Some of the internal functions ("__xxx") are useful when
15 * manipulating whole lists rather than single entries, as
16 * sometimes we already know the next/prev entries and we can
17 * generate better code by using them directly rather than
18 * using the generic single-entry routines.
19 */
20
21 struct list_head {
22 struct list_head *next, *prev;
23 };
24
25 #define LIST_HEAD_INIT(name) { &(name), &(name) }
26
27 #define LIST_HEAD(name) \
28 struct list_head name = LIST_HEAD_INIT(name)
29
30 static inline void INIT_LIST_HEAD(struct list_head *list)
31 {
32 list->next = list;
33 list->prev = list;
34 }
35
36 /*
37 * Insert a new entry between two known consecutive entries.
38 *
39 * This is only for internal list manipulation where we know
40 * the prev/next entries already!
41 */
42 #ifndef CONFIG_DEBUG_LIST
43 static inline void __list_add(struct list_head *new,
44 struct list_head *prev,
45 struct list_head *next)
46 {
47 next->prev = new;
48 new->next = next;
49 new->prev = prev;
50 prev->next = new;
51 }
52 #else
53 extern void __list_add(struct list_head *new,
54 struct list_head *prev,
55 struct list_head *next);
56 #endif
57
58 /**
59 * list_add - add a new entry
60 * @new: new entry to be added
61 * @head: list head to add it after
62 *
63 * Insert a new entry after the specified head.
64 * This is good for implementing stacks.
65 */
66 #ifndef CONFIG_DEBUG_LIST
67 static inline void list_add(struct list_head *new, struct list_head *head)
68 {
69 __list_add(new, head, head->next);
70 }
71 #else
72 extern void list_add(struct list_head *new, struct list_head *head);
73 #endif
74
75
76 /**
77 * list_add_tail - add a new entry
78 * @new: new entry to be added
79 * @head: list head to add it before
80 *
81 * Insert a new entry before the specified head.
82 * This is useful for implementing queues.
83 */
84 static inline void list_add_tail(struct list_head *new, struct list_head *head)
85 {
86 __list_add(new, head->prev, head);
87 }
88
89 /*
90 * Insert a new entry between two known consecutive entries.
91 *
92 * This is only for internal list manipulation where we know
93 * the prev/next entries already!
94 */
95 static inline void __list_add_rcu(struct list_head * new,
96 struct list_head * prev, struct list_head * next)
97 {
98 new->next = next;
99 new->prev = prev;
100 smp_wmb();
101 next->prev = new;
102 prev->next = new;
103 }
104
105 /**
106 * list_add_rcu - add a new entry to rcu-protected list
107 * @new: new entry to be added
108 * @head: list head to add it after
109 *
110 * Insert a new entry after the specified head.
111 * This is good for implementing stacks.
112 *
113 * The caller must take whatever precautions are necessary
114 * (such as holding appropriate locks) to avoid racing
115 * with another list-mutation primitive, such as list_add_rcu()
116 * or list_del_rcu(), running on this same list.
117 * However, it is perfectly legal to run concurrently with
118 * the _rcu list-traversal primitives, such as
119 * list_for_each_entry_rcu().
120 */
121 static inline void list_add_rcu(struct list_head *new, struct list_head *head)
122 {
123 __list_add_rcu(new, head, head->next);
124 }
125
126 /**
127 * list_add_tail_rcu - add a new entry to rcu-protected list
128 * @new: new entry to be added
129 * @head: list head to add it before
130 *
131 * Insert a new entry before the specified head.
132 * This is useful for implementing queues.
133 *
134 * The caller must take whatever precautions are necessary
135 * (such as holding appropriate locks) to avoid racing
136 * with another list-mutation primitive, such as list_add_tail_rcu()
137 * or list_del_rcu(), running on this same list.
138 * However, it is perfectly legal to run concurrently with
139 * the _rcu list-traversal primitives, such as
140 * list_for_each_entry_rcu().
141 */
142 static inline void list_add_tail_rcu(struct list_head *new,
143 struct list_head *head)
144 {
145 __list_add_rcu(new, head->prev, head);
146 }
147
148 /*
149 * Delete a list entry by making the prev/next entries
150 * point to each other.
151 *
152 * This is only for internal list manipulation where we know
153 * the prev/next entries already!
154 */
155 static inline void __list_del(struct list_head * prev, struct list_head * next)
156 {
157 next->prev = prev;
158 prev->next = next;
159 }
160
161 /**
162 * list_del - deletes entry from list.
163 * @entry: the element to delete from the list.
164 * Note: list_empty() on entry does not return true after this, the entry is
165 * in an undefined state.
166 */
167 #ifndef CONFIG_DEBUG_LIST
168 static inline void list_del(struct list_head *entry)
169 {
170 __list_del(entry->prev, entry->next);
171 entry->next = LIST_POISON1;
172 entry->prev = LIST_POISON2;
173 }
174 #else
175 extern void list_del(struct list_head *entry);
176 #endif
177
178 /**
179 * list_del_rcu - deletes entry from list without re-initialization
180 * @entry: the element to delete from the list.
181 *
182 * Note: list_empty() on entry does not return true after this,
183 * the entry is in an undefined state. It is useful for RCU based
184 * lockfree traversal.
185 *
186 * In particular, it means that we can not poison the forward
187 * pointers that may still be used for walking the list.
188 *
189 * The caller must take whatever precautions are necessary
190 * (such as holding appropriate locks) to avoid racing
191 * with another list-mutation primitive, such as list_del_rcu()
192 * or list_add_rcu(), running on this same list.
193 * However, it is perfectly legal to run concurrently with
194 * the _rcu list-traversal primitives, such as
195 * list_for_each_entry_rcu().
196 *
197 * Note that the caller is not permitted to immediately free
198 * the newly deleted entry. Instead, either synchronize_rcu()
199 * or call_rcu() must be used to defer freeing until an RCU
200 * grace period has elapsed.
201 */
202 static inline void list_del_rcu(struct list_head *entry)
203 {
204 __list_del(entry->prev, entry->next);
205 entry->prev = LIST_POISON2;
206 }
207
208 /**
209 * list_replace - replace old entry by new one
210 * @old : the element to be replaced
211 * @new : the new element to insert
212 *
213 * If @old was empty, it will be overwritten.
214 */
215 static inline void list_replace(struct list_head *old,
216 struct list_head *new)
217 {
218 new->next = old->next;
219 new->next->prev = new;
220 new->prev = old->prev;
221 new->prev->next = new;
222 }
223
224 static inline void list_replace_init(struct list_head *old,
225 struct list_head *new)
226 {
227 list_replace(old, new);
228 INIT_LIST_HEAD(old);
229 }
230
231 /**
232 * list_replace_rcu - replace old entry by new one
233 * @old : the element to be replaced
234 * @new : the new element to insert
235 *
236 * The @old entry will be replaced with the @new entry atomically.
237 * Note: @old should not be empty.
238 */
239 static inline void list_replace_rcu(struct list_head *old,
240 struct list_head *new)
241 {
242 new->next = old->next;
243 new->prev = old->prev;
244 smp_wmb();
245 new->next->prev = new;
246 new->prev->next = new;
247 old->prev = LIST_POISON2;
248 }
249
250 /**
251 * list_del_init - deletes entry from list and reinitialize it.
252 * @entry: the element to delete from the list.
253 */
254 static inline void list_del_init(struct list_head *entry)
255 {
256 __list_del(entry->prev, entry->next);
257 INIT_LIST_HEAD(entry);
258 }
259
260 /**
261 * list_move - delete from one list and add as another's head
262 * @list: the entry to move
263 * @head: the head that will precede our entry
264 */
265 static inline void list_move(struct list_head *list, struct list_head *head)
266 {
267 __list_del(list->prev, list->next);
268 list_add(list, head);
269 }
270
271 /**
272 * list_move_tail - delete from one list and add as another's tail
273 * @list: the entry to move
274 * @head: the head that will follow our entry
275 */
276 static inline void list_move_tail(struct list_head *list,
277 struct list_head *head)
278 {
279 __list_del(list->prev, list->next);
280 list_add_tail(list, head);
281 }
282
283 /**
284 * list_is_last - tests whether @list is the last entry in list @head
285 * @list: the entry to test
286 * @head: the head of the list
287 */
288 static inline int list_is_last(const struct list_head *list,
289 const struct list_head *head)
290 {
291 return list->next == head;
292 }
293
294 /**
295 * list_empty - tests whether a list is empty
296 * @head: the list to test.
297 */
298 static inline int list_empty(const struct list_head *head)
299 {
300 return head->next == head;
301 }
302
303 /**
304 * list_empty_careful - tests whether a list is empty and not being modified
305 * @head: the list to test
306 *
307 * Description:
308 * tests whether a list is empty _and_ checks that no other CPU might be
309 * in the process of modifying either member (next or prev)
310 *
311 * NOTE: using list_empty_careful() without synchronization
312 * can only be safe if the only activity that can happen
313 * to the list entry is list_del_init(). Eg. it cannot be used
314 * if another CPU could re-list_add() it.
315 */
316 static inline int list_empty_careful(const struct list_head *head)
317 {
318 struct list_head *next = head->next;
319 return (next == head) && (next == head->prev);
320 }
321
322 static inline void __list_splice(struct list_head *list,
323 struct list_head *head)
324 {
325 struct list_head *first = list->next;
326 struct list_head *last = list->prev;
327 struct list_head *at = head->next;
328
329 first->prev = head;
330 head->next = first;
331
332 last->next = at;
333 at->prev = last;
334 }
335
336 /**
337 * list_splice - join two lists
338 * @list: the new list to add.
339 * @head: the place to add it in the first list.
340 */
341 static inline void list_splice(struct list_head *list, struct list_head *head)
342 {
343 if (!list_empty(list))
344 __list_splice(list, head);
345 }
346
347 /**
348 * list_splice_init - join two lists and reinitialise the emptied list.
349 * @list: the new list to add.
350 * @head: the place to add it in the first list.
351 *
352 * The list at @list is reinitialised
353 */
354 static inline void list_splice_init(struct list_head *list,
355 struct list_head *head)
356 {
357 if (!list_empty(list)) {
358 __list_splice(list, head);
359 INIT_LIST_HEAD(list);
360 }
361 }
362
363 /**
364 * list_splice_init_rcu - splice an RCU-protected list into an existing list.
365 * @list: the RCU-protected list to splice
366 * @head: the place in the list to splice the first list into
367 * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
368 *
369 * @head can be RCU-read traversed concurrently with this function.
370 *
371 * Note that this function blocks.
372 *
373 * Important note: the caller must take whatever action is necessary to
374 * prevent any other updates to @head. In principle, it is possible
375 * to modify the list as soon as sync() begins execution.
376 * If this sort of thing becomes necessary, an alternative version
377 * based on call_rcu() could be created. But only if -really-
378 * needed -- there is no shortage of RCU API members.
379 */
380 static inline void list_splice_init_rcu(struct list_head *list,
381 struct list_head *head,
382 void (*sync)(void))
383 {
384 struct list_head *first = list->next;
385 struct list_head *last = list->prev;
386 struct list_head *at = head->next;
387
388 if (list_empty(head))
389 return;
390
391 /* "first" and "last" tracking list, so initialize it. */
392
393 INIT_LIST_HEAD(list);
394
395 /*
396 * At this point, the list body still points to the source list.
397 * Wait for any readers to finish using the list before splicing
398 * the list body into the new list. Any new readers will see
399 * an empty list.
400 */
401
402 sync();
403
404 /*
405 * Readers are finished with the source list, so perform splice.
406 * The order is important if the new list is global and accessible
407 * to concurrent RCU readers. Note that RCU readers are not
408 * permitted to traverse the prev pointers without excluding
409 * this function.
410 */
411
412 last->next = at;
413 smp_wmb();
414 head->next = first;
415 first->prev = head;
416 at->prev = last;
417 }
418
419 /**
420 * list_entry - get the struct for this entry
421 * @ptr: the &struct list_head pointer.
422 * @type: the type of the struct this is embedded in.
423 * @member: the name of the list_struct within the struct.
424 */
425 #define list_entry(ptr, type, member) \
426 container_of(ptr, type, member)
427
428 /**
429 * list_first_entry - get the first element from a list
430 * @ptr: the list head to take the element from.
431 * @type: the type of the struct this is embedded in.
432 * @member: the name of the list_struct within the struct.
433 *
434 * Note, that list is expected to be not empty.
435 */
436 #define list_first_entry(ptr, type, member) \
437 list_entry((ptr)->next, type, member)
438
439 /**
440 * list_for_each - iterate over a list
441 * @pos: the &struct list_head to use as a loop cursor.
442 * @head: the head for your list.
443 */
444 #define list_for_each(pos, head) \
445 for (pos = (head)->next; prefetch(pos->next), pos != (head); \
446 pos = pos->next)
447
448 /**
449 * __list_for_each - iterate over a list
450 * @pos: the &struct list_head to use as a loop cursor.
451 * @head: the head for your list.
452 *
453 * This variant differs from list_for_each() in that it's the
454 * simplest possible list iteration code, no prefetching is done.
455 * Use this for code that knows the list to be very short (empty
456 * or 1 entry) most of the time.
457 */
458 #define __list_for_each(pos, head) \
459 for (pos = (head)->next; pos != (head); pos = pos->next)
460
461 /**
462 * list_for_each_prev - iterate over a list backwards
463 * @pos: the &struct list_head to use as a loop cursor.
464 * @head: the head for your list.
465 */
466 #define list_for_each_prev(pos, head) \
467 for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
468 pos = pos->prev)
469
470 /**
471 * list_for_each_safe - iterate over a list safe against removal of list entry
472 * @pos: the &struct list_head to use as a loop cursor.
473 * @n: another &struct list_head to use as temporary storage
474 * @head: the head for your list.
475 */
476 #define list_for_each_safe(pos, n, head) \
477 for (pos = (head)->next, n = pos->next; pos != (head); \
478 pos = n, n = pos->next)
479
480 /**
481 * list_for_each_entry - iterate over list of given type
482 * @pos: the type * to use as a loop cursor.
483 * @head: the head for your list.
484 * @member: the name of the list_struct within the struct.
485 */
486 #define list_for_each_entry(pos, head, member) \
487 for (pos = list_entry((head)->next, typeof(*pos), member); \
488 prefetch(pos->member.next), &pos->member != (head); \
489 pos = list_entry(pos->member.next, typeof(*pos), member))
490
491 /**
492 * list_for_each_entry_reverse - iterate backwards over list of given type.
493 * @pos: the type * to use as a loop cursor.
494 * @head: the head for your list.
495 * @member: the name of the list_struct within the struct.
496 */
497 #define list_for_each_entry_reverse(pos, head, member) \
498 for (pos = list_entry((head)->prev, typeof(*pos), member); \
499 prefetch(pos->member.prev), &pos->member != (head); \
500 pos = list_entry(pos->member.prev, typeof(*pos), member))
501
502 /**
503 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
504 * @pos: the type * to use as a start point
505 * @head: the head of the list
506 * @member: the name of the list_struct within the struct.
507 *
508 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
509 */
510 #define list_prepare_entry(pos, head, member) \
511 ((pos) ? : list_entry(head, typeof(*pos), member))
512
513 /**
514 * list_for_each_entry_continue - continue iteration over list of given type
515 * @pos: the type * to use as a loop cursor.
516 * @head: the head for your list.
517 * @member: the name of the list_struct within the struct.
518 *
519 * Continue to iterate over list of given type, continuing after
520 * the current position.
521 */
522 #define list_for_each_entry_continue(pos, head, member) \
523 for (pos = list_entry(pos->member.next, typeof(*pos), member); \
524 prefetch(pos->member.next), &pos->member != (head); \
525 pos = list_entry(pos->member.next, typeof(*pos), member))
526
527 /**
528 * list_for_each_entry_from - iterate over list of given type from the current point
529 * @pos: the type * to use as a loop cursor.
530 * @head: the head for your list.
531 * @member: the name of the list_struct within the struct.
532 *
533 * Iterate over list of given type, continuing from current position.
534 */
535 #define list_for_each_entry_from(pos, head, member) \
536 for (; prefetch(pos->member.next), &pos->member != (head); \
537 pos = list_entry(pos->member.next, typeof(*pos), member))
538
539 /**
540 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
541 * @pos: the type * to use as a loop cursor.
542 * @n: another type * to use as temporary storage
543 * @head: the head for your list.
544 * @member: the name of the list_struct within the struct.
545 */
546 #define list_for_each_entry_safe(pos, n, head, member) \
547 for (pos = list_entry((head)->next, typeof(*pos), member), \
548 n = list_entry(pos->member.next, typeof(*pos), member); \
549 &pos->member != (head); \
550 pos = n, n = list_entry(n->member.next, typeof(*n), member))
551
552 /**
553 * list_for_each_entry_safe_continue
554 * @pos: the type * to use as a loop cursor.
555 * @n: another type * to use as temporary storage
556 * @head: the head for your list.
557 * @member: the name of the list_struct within the struct.
558 *
559 * Iterate over list of given type, continuing after current point,
560 * safe against removal of list entry.
561 */
562 #define list_for_each_entry_safe_continue(pos, n, head, member) \
563 for (pos = list_entry(pos->member.next, typeof(*pos), member), \
564 n = list_entry(pos->member.next, typeof(*pos), member); \
565 &pos->member != (head); \
566 pos = n, n = list_entry(n->member.next, typeof(*n), member))
567
568 /**
569 * list_for_each_entry_safe_from
570 * @pos: the type * to use as a loop cursor.
571 * @n: another type * to use as temporary storage
572 * @head: the head for your list.
573 * @member: the name of the list_struct within the struct.
574 *
575 * Iterate over list of given type from current point, safe against
576 * removal of list entry.
577 */
578 #define list_for_each_entry_safe_from(pos, n, head, member) \
579 for (n = list_entry(pos->member.next, typeof(*pos), member); \
580 &pos->member != (head); \
581 pos = n, n = list_entry(n->member.next, typeof(*n), member))
582
583 /**
584 * list_for_each_entry_safe_reverse
585 * @pos: the type * to use as a loop cursor.
586 * @n: another type * to use as temporary storage
587 * @head: the head for your list.
588 * @member: the name of the list_struct within the struct.
589 *
590 * Iterate backwards over list of given type, safe against removal
591 * of list entry.
592 */
593 #define list_for_each_entry_safe_reverse(pos, n, head, member) \
594 for (pos = list_entry((head)->prev, typeof(*pos), member), \
595 n = list_entry(pos->member.prev, typeof(*pos), member); \
596 &pos->member != (head); \
597 pos = n, n = list_entry(n->member.prev, typeof(*n), member))
598
599 /**
600 * list_for_each_rcu - iterate over an rcu-protected list
601 * @pos: the &struct list_head to use as a loop cursor.
602 * @head: the head for your list.
603 *
604 * This list-traversal primitive may safely run concurrently with
605 * the _rcu list-mutation primitives such as list_add_rcu()
606 * as long as the traversal is guarded by rcu_read_lock().
607 */
608 #define list_for_each_rcu(pos, head) \
609 for (pos = (head)->next; \
610 prefetch(rcu_dereference(pos)->next), pos != (head); \
611 pos = pos->next)
612
613 #define __list_for_each_rcu(pos, head) \
614 for (pos = (head)->next; \
615 rcu_dereference(pos) != (head); \
616 pos = pos->next)
617
618 /**
619 * list_for_each_safe_rcu
620 * @pos: the &struct list_head to use as a loop cursor.
621 * @n: another &struct list_head to use as temporary storage
622 * @head: the head for your list.
623 *
624 * Iterate over an rcu-protected list, safe against removal of list entry.
625 *
626 * This list-traversal primitive may safely run concurrently with
627 * the _rcu list-mutation primitives such as list_add_rcu()
628 * as long as the traversal is guarded by rcu_read_lock().
629 */
630 #define list_for_each_safe_rcu(pos, n, head) \
631 for (pos = (head)->next; \
632 n = rcu_dereference(pos)->next, pos != (head); \
633 pos = n)
634
635 /**
636 * list_for_each_entry_rcu - iterate over rcu list of given type
637 * @pos: the type * to use as a loop cursor.
638 * @head: the head for your list.
639 * @member: the name of the list_struct within the struct.
640 *
641 * This list-traversal primitive may safely run concurrently with
642 * the _rcu list-mutation primitives such as list_add_rcu()
643 * as long as the traversal is guarded by rcu_read_lock().
644 */
645 #define list_for_each_entry_rcu(pos, head, member) \
646 for (pos = list_entry((head)->next, typeof(*pos), member); \
647 prefetch(rcu_dereference(pos)->member.next), \
648 &pos->member != (head); \
649 pos = list_entry(pos->member.next, typeof(*pos), member))
650
651
652 /**
653 * list_for_each_continue_rcu
654 * @pos: the &struct list_head to use as a loop cursor.
655 * @head: the head for your list.
656 *
657 * Iterate over an rcu-protected list, continuing after current point.
658 *
659 * This list-traversal primitive may safely run concurrently with
660 * the _rcu list-mutation primitives such as list_add_rcu()
661 * as long as the traversal is guarded by rcu_read_lock().
662 */
663 #define list_for_each_continue_rcu(pos, head) \
664 for ((pos) = (pos)->next; \
665 prefetch(rcu_dereference((pos))->next), (pos) != (head); \
666 (pos) = (pos)->next)
667
668 /*
669 * Double linked lists with a single pointer list head.
670 * Mostly useful for hash tables where the two pointer list head is
671 * too wasteful.
672 * You lose the ability to access the tail in O(1).
673 */
674
675 struct hlist_head {
676 struct hlist_node *first;
677 };
678
679 struct hlist_node {
680 struct hlist_node *next, **pprev;
681 };
682
683 #define HLIST_HEAD_INIT { .first = NULL }
684 #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
685 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
686 static inline void INIT_HLIST_NODE(struct hlist_node *h)
687 {
688 h->next = NULL;
689 h->pprev = NULL;
690 }
691
692 static inline int hlist_unhashed(const struct hlist_node *h)
693 {
694 return !h->pprev;
695 }
696
697 static inline int hlist_empty(const struct hlist_head *h)
698 {
699 return !h->first;
700 }
701
702 static inline void __hlist_del(struct hlist_node *n)
703 {
704 struct hlist_node *next = n->next;
705 struct hlist_node **pprev = n->pprev;
706 *pprev = next;
707 if (next)
708 next->pprev = pprev;
709 }
710
711 static inline void hlist_del(struct hlist_node *n)
712 {
713 __hlist_del(n);
714 n->next = LIST_POISON1;
715 n->pprev = LIST_POISON2;
716 }
717
718 /**
719 * hlist_del_rcu - deletes entry from hash list without re-initialization
720 * @n: the element to delete from the hash list.
721 *
722 * Note: list_unhashed() on entry does not return true after this,
723 * the entry is in an undefined state. It is useful for RCU based
724 * lockfree traversal.
725 *
726 * In particular, it means that we can not poison the forward
727 * pointers that may still be used for walking the hash list.
728 *
729 * The caller must take whatever precautions are necessary
730 * (such as holding appropriate locks) to avoid racing
731 * with another list-mutation primitive, such as hlist_add_head_rcu()
732 * or hlist_del_rcu(), running on this same list.
733 * However, it is perfectly legal to run concurrently with
734 * the _rcu list-traversal primitives, such as
735 * hlist_for_each_entry().
736 */
737 static inline void hlist_del_rcu(struct hlist_node *n)
738 {
739 __hlist_del(n);
740 n->pprev = LIST_POISON2;
741 }
742
743 static inline void hlist_del_init(struct hlist_node *n)
744 {
745 if (!hlist_unhashed(n)) {
746 __hlist_del(n);
747 INIT_HLIST_NODE(n);
748 }
749 }
750
751 /**
752 * hlist_replace_rcu - replace old entry by new one
753 * @old : the element to be replaced
754 * @new : the new element to insert
755 *
756 * The @old entry will be replaced with the @new entry atomically.
757 */
758 static inline void hlist_replace_rcu(struct hlist_node *old,
759 struct hlist_node *new)
760 {
761 struct hlist_node *next = old->next;
762
763 new->next = next;
764 new->pprev = old->pprev;
765 smp_wmb();
766 if (next)
767 new->next->pprev = &new->next;
768 *new->pprev = new;
769 old->pprev = LIST_POISON2;
770 }
771
772 static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
773 {
774 struct hlist_node *first = h->first;
775 n->next = first;
776 if (first)
777 first->pprev = &n->next;
778 h->first = n;
779 n->pprev = &h->first;
780 }
781
782
783 /**
784 * hlist_add_head_rcu
785 * @n: the element to add to the hash list.
786 * @h: the list to add to.
787 *
788 * Description:
789 * Adds the specified element to the specified hlist,
790 * while permitting racing traversals.
791 *
792 * The caller must take whatever precautions are necessary
793 * (such as holding appropriate locks) to avoid racing
794 * with another list-mutation primitive, such as hlist_add_head_rcu()
795 * or hlist_del_rcu(), running on this same list.
796 * However, it is perfectly legal to run concurrently with
797 * the _rcu list-traversal primitives, such as
798 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
799 * problems on Alpha CPUs. Regardless of the type of CPU, the
800 * list-traversal primitive must be guarded by rcu_read_lock().
801 */
802 static inline void hlist_add_head_rcu(struct hlist_node *n,
803 struct hlist_head *h)
804 {
805 struct hlist_node *first = h->first;
806 n->next = first;
807 n->pprev = &h->first;
808 smp_wmb();
809 if (first)
810 first->pprev = &n->next;
811 h->first = n;
812 }
813
814 /* next must be != NULL */
815 static inline void hlist_add_before(struct hlist_node *n,
816 struct hlist_node *next)
817 {
818 n->pprev = next->pprev;
819 n->next = next;
820 next->pprev = &n->next;
821 *(n->pprev) = n;
822 }
823
824 static inline void hlist_add_after(struct hlist_node *n,
825 struct hlist_node *next)
826 {
827 next->next = n->next;
828 n->next = next;
829 next->pprev = &n->next;
830
831 if(next->next)
832 next->next->pprev = &next->next;
833 }
834
835 /**
836 * hlist_add_before_rcu
837 * @n: the new element to add to the hash list.
838 * @next: the existing element to add the new element before.
839 *
840 * Description:
841 * Adds the specified element to the specified hlist
842 * before the specified node while permitting racing traversals.
843 *
844 * The caller must take whatever precautions are necessary
845 * (such as holding appropriate locks) to avoid racing
846 * with another list-mutation primitive, such as hlist_add_head_rcu()
847 * or hlist_del_rcu(), running on this same list.
848 * However, it is perfectly legal to run concurrently with
849 * the _rcu list-traversal primitives, such as
850 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
851 * problems on Alpha CPUs.
852 */
853 static inline void hlist_add_before_rcu(struct hlist_node *n,
854 struct hlist_node *next)
855 {
856 n->pprev = next->pprev;
857 n->next = next;
858 smp_wmb();
859 next->pprev = &n->next;
860 *(n->pprev) = n;
861 }
862
863 /**
864 * hlist_add_after_rcu
865 * @prev: the existing element to add the new element after.
866 * @n: the new element to add to the hash list.
867 *
868 * Description:
869 * Adds the specified element to the specified hlist
870 * after the specified node while permitting racing traversals.
871 *
872 * The caller must take whatever precautions are necessary
873 * (such as holding appropriate locks) to avoid racing
874 * with another list-mutation primitive, such as hlist_add_head_rcu()
875 * or hlist_del_rcu(), running on this same list.
876 * However, it is perfectly legal to run concurrently with
877 * the _rcu list-traversal primitives, such as
878 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
879 * problems on Alpha CPUs.
880 */
881 static inline void hlist_add_after_rcu(struct hlist_node *prev,
882 struct hlist_node *n)
883 {
884 n->next = prev->next;
885 n->pprev = &prev->next;
886 smp_wmb();
887 prev->next = n;
888 if (n->next)
889 n->next->pprev = &n->next;
890 }
891
892 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
893
894 #define hlist_for_each(pos, head) \
895 for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
896 pos = pos->next)
897
898 #define hlist_for_each_safe(pos, n, head) \
899 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
900 pos = n)
901
902 /**
903 * hlist_for_each_entry - iterate over list of given type
904 * @tpos: the type * to use as a loop cursor.
905 * @pos: the &struct hlist_node to use as a loop cursor.
906 * @head: the head for your list.
907 * @member: the name of the hlist_node within the struct.
908 */
909 #define hlist_for_each_entry(tpos, pos, head, member) \
910 for (pos = (head)->first; \
911 pos && ({ prefetch(pos->next); 1;}) && \
912 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
913 pos = pos->next)
914
915 /**
916 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
917 * @tpos: the type * to use as a loop cursor.
918 * @pos: the &struct hlist_node to use as a loop cursor.
919 * @member: the name of the hlist_node within the struct.
920 */
921 #define hlist_for_each_entry_continue(tpos, pos, member) \
922 for (pos = (pos)->next; \
923 pos && ({ prefetch(pos->next); 1;}) && \
924 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
925 pos = pos->next)
926
927 /**
928 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
929 * @tpos: the type * to use as a loop cursor.
930 * @pos: the &struct hlist_node to use as a loop cursor.
931 * @member: the name of the hlist_node within the struct.
932 */
933 #define hlist_for_each_entry_from(tpos, pos, member) \
934 for (; pos && ({ prefetch(pos->next); 1;}) && \
935 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
936 pos = pos->next)
937
938 /**
939 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
940 * @tpos: the type * to use as a loop cursor.
941 * @pos: the &struct hlist_node to use as a loop cursor.
942 * @n: another &struct hlist_node to use as temporary storage
943 * @head: the head for your list.
944 * @member: the name of the hlist_node within the struct.
945 */
946 #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
947 for (pos = (head)->first; \
948 pos && ({ n = pos->next; 1; }) && \
949 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
950 pos = n)
951
952 /**
953 * hlist_for_each_entry_rcu - iterate over rcu list of given type
954 * @tpos: the type * to use as a loop cursor.
955 * @pos: the &struct hlist_node to use as a loop cursor.
956 * @head: the head for your list.
957 * @member: the name of the hlist_node within the struct.
958 *
959 * This list-traversal primitive may safely run concurrently with
960 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
961 * as long as the traversal is guarded by rcu_read_lock().
962 */
963 #define hlist_for_each_entry_rcu(tpos, pos, head, member) \
964 for (pos = (head)->first; \
965 rcu_dereference(pos) && ({ prefetch(pos->next); 1;}) && \
966 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
967 pos = pos->next)
968
969 #else
970 #warning "don't include kernel headers in userspace"
971 #endif /* __KERNEL__ */
972 #endif
This page took 0.066969 seconds and 5 git commands to generate.