Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2001 Momchil Velikov | |
3 | * Portions Copyright (C) 2001 Christoph Hellwig | |
7cf9c2c7 | 4 | * Copyright (C) 2006 Nick Piggin |
78c1d784 | 5 | * Copyright (C) 2012 Konstantin Khlebnikov |
1da177e4 LT |
6 | * |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License as | |
9 | * published by the Free Software Foundation; either version 2, or (at | |
10 | * your option) any later version. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but | |
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
20 | */ | |
21 | #ifndef _LINUX_RADIX_TREE_H | |
22 | #define _LINUX_RADIX_TREE_H | |
23 | ||
f67c07f0 | 24 | #include <linux/bitops.h> |
1da177e4 LT |
25 | #include <linux/preempt.h> |
26 | #include <linux/types.h> | |
187f1882 | 27 | #include <linux/bug.h> |
7cf9c2c7 NP |
28 | #include <linux/kernel.h> |
29 | #include <linux/rcupdate.h> | |
30 | ||
31 | /* | |
c0bc9875 NP |
32 | * An indirect pointer (root->rnode pointing to a radix_tree_node, rather |
33 | * than a data item) is signalled by the low bit set in the root->rnode | |
34 | * pointer. | |
7cf9c2c7 | 35 | * |
c0bc9875 NP |
36 | * In this case root->height is > 0, but the indirect pointer tests are |
37 | * needed for RCU lookups (because root->height is unreliable). The only | |
38 | * time callers need worry about this is when doing a lookup_slot under | |
39 | * RCU. | |
27d20fdd NP |
40 | * |
41 | * Indirect pointer in fact is also used to tag the last pointer of a node | |
42 | * when it is shrunk, before we rcu free the node. See shrink code for | |
43 | * details. | |
7cf9c2c7 | 44 | */ |
6328650b HD |
45 | #define RADIX_TREE_INDIRECT_PTR 1 |
46 | /* | |
47 | * A common use of the radix tree is to store pointers to struct pages; | |
48 | * but shmem/tmpfs needs also to store swap entries in the same tree: | |
49 | * those are marked as exceptional entries to distinguish them. | |
50 | * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it. | |
51 | */ | |
52 | #define RADIX_TREE_EXCEPTIONAL_ENTRY 2 | |
53 | #define RADIX_TREE_EXCEPTIONAL_SHIFT 2 | |
7cf9c2c7 | 54 | |
f9fe48be RZ |
55 | #define RADIX_DAX_MASK 0xf |
56 | #define RADIX_DAX_SHIFT 4 | |
57 | #define RADIX_DAX_PTE (0x4 | RADIX_TREE_EXCEPTIONAL_ENTRY) | |
58 | #define RADIX_DAX_PMD (0x8 | RADIX_TREE_EXCEPTIONAL_ENTRY) | |
59 | #define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_MASK) | |
60 | #define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT)) | |
61 | #define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \ | |
62 | RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE))) | |
63 | ||
c0bc9875 | 64 | static inline int radix_tree_is_indirect_ptr(void *ptr) |
7cf9c2c7 | 65 | { |
c0bc9875 | 66 | return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR); |
7cf9c2c7 NP |
67 | } |
68 | ||
69 | /*** radix-tree API starts here ***/ | |
1da177e4 | 70 | |
f446daae | 71 | #define RADIX_TREE_MAX_TAGS 3 |
612d6c19 | 72 | |
139e5616 JW |
73 | #ifdef __KERNEL__ |
74 | #define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) | |
75 | #else | |
76 | #define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */ | |
77 | #endif | |
78 | ||
79 | #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) | |
80 | #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) | |
81 | ||
82 | #define RADIX_TREE_TAG_LONGS \ | |
83 | ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG) | |
84 | ||
449dd698 JW |
85 | #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) |
86 | #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ | |
87 | RADIX_TREE_MAP_SHIFT)) | |
88 | ||
89 | /* Height component in node->path */ | |
90 | #define RADIX_TREE_HEIGHT_SHIFT (RADIX_TREE_MAX_PATH + 1) | |
91 | #define RADIX_TREE_HEIGHT_MASK ((1UL << RADIX_TREE_HEIGHT_SHIFT) - 1) | |
92 | ||
93 | /* Internally used bits of node->count */ | |
94 | #define RADIX_TREE_COUNT_SHIFT (RADIX_TREE_MAP_SHIFT + 1) | |
95 | #define RADIX_TREE_COUNT_MASK ((1UL << RADIX_TREE_COUNT_SHIFT) - 1) | |
96 | ||
139e5616 | 97 | struct radix_tree_node { |
449dd698 | 98 | unsigned int path; /* Offset in parent & height from the bottom */ |
139e5616 JW |
99 | unsigned int count; |
100 | union { | |
449dd698 JW |
101 | struct { |
102 | /* Used when ascending tree */ | |
103 | struct radix_tree_node *parent; | |
104 | /* For tree user */ | |
105 | void *private_data; | |
106 | }; | |
107 | /* Used when freeing node */ | |
108 | struct rcu_head rcu_head; | |
139e5616 | 109 | }; |
449dd698 JW |
110 | /* For tree user */ |
111 | struct list_head private_list; | |
139e5616 JW |
112 | void __rcu *slots[RADIX_TREE_MAP_SIZE]; |
113 | unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; | |
114 | }; | |
115 | ||
612d6c19 | 116 | /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ |
1da177e4 LT |
117 | struct radix_tree_root { |
118 | unsigned int height; | |
fd4f2df2 | 119 | gfp_t gfp_mask; |
a1115570 | 120 | struct radix_tree_node __rcu *rnode; |
1da177e4 LT |
121 | }; |
122 | ||
123 | #define RADIX_TREE_INIT(mask) { \ | |
124 | .height = 0, \ | |
125 | .gfp_mask = (mask), \ | |
126 | .rnode = NULL, \ | |
127 | } | |
128 | ||
129 | #define RADIX_TREE(name, mask) \ | |
130 | struct radix_tree_root name = RADIX_TREE_INIT(mask) | |
131 | ||
132 | #define INIT_RADIX_TREE(root, mask) \ | |
133 | do { \ | |
134 | (root)->height = 0; \ | |
135 | (root)->gfp_mask = (mask); \ | |
136 | (root)->rnode = NULL; \ | |
137 | } while (0) | |
138 | ||
7cf9c2c7 NP |
139 | /** |
140 | * Radix-tree synchronization | |
141 | * | |
142 | * The radix-tree API requires that users provide all synchronisation (with | |
143 | * specific exceptions, noted below). | |
144 | * | |
145 | * Synchronization of access to the data items being stored in the tree, and | |
146 | * management of their lifetimes must be completely managed by API users. | |
147 | * | |
148 | * For API usage, in general, | |
59c51591 | 149 | * - any function _modifying_ the tree or tags (inserting or deleting |
eb8dc5e7 | 150 | * items, setting or clearing tags) must exclude other modifications, and |
7cf9c2c7 | 151 | * exclude any functions reading the tree. |
59c51591 | 152 | * - any function _reading_ the tree or tags (looking up items or tags, |
7cf9c2c7 NP |
153 | * gang lookups) must exclude modifications to the tree, but may occur |
154 | * concurrently with other readers. | |
155 | * | |
156 | * The notable exceptions to this rule are the following functions: | |
139e5616 | 157 | * __radix_tree_lookup |
7cf9c2c7 | 158 | * radix_tree_lookup |
47feff2c | 159 | * radix_tree_lookup_slot |
7cf9c2c7 NP |
160 | * radix_tree_tag_get |
161 | * radix_tree_gang_lookup | |
47feff2c | 162 | * radix_tree_gang_lookup_slot |
7cf9c2c7 | 163 | * radix_tree_gang_lookup_tag |
47feff2c | 164 | * radix_tree_gang_lookup_tag_slot |
7cf9c2c7 NP |
165 | * radix_tree_tagged |
166 | * | |
243c2137 | 167 | * The first 8 functions are able to be called locklessly, using RCU. The |
7cf9c2c7 NP |
168 | * caller must ensure calls to these functions are made within rcu_read_lock() |
169 | * regions. Other readers (lock-free or otherwise) and modifications may be | |
170 | * running concurrently. | |
171 | * | |
172 | * It is still required that the caller manage the synchronization and lifetimes | |
173 | * of the items. So if RCU lock-free lookups are used, typically this would mean | |
174 | * that the items have their own locks, or are amenable to lock-free access; and | |
175 | * that the items are freed by RCU (or only freed after having been deleted from | |
176 | * the radix tree *and* a synchronize_rcu() grace period). | |
177 | * | |
178 | * (Note, rcu_assign_pointer and rcu_dereference are not needed to control | |
179 | * access to data items when inserting into or looking up from the radix tree) | |
180 | * | |
ce82653d DH |
181 | * Note that the value returned by radix_tree_tag_get() may not be relied upon |
182 | * if only the RCU read lock is held. Functions to set/clear tags and to | |
183 | * delete nodes running concurrently with it may affect its result such that | |
184 | * two consecutive reads in the same locked section may return different | |
185 | * values. If reliability is required, modification functions must also be | |
186 | * excluded from concurrency. | |
187 | * | |
7cf9c2c7 NP |
188 | * radix_tree_tagged is able to be called without locking or RCU. |
189 | */ | |
190 | ||
191 | /** | |
192 | * radix_tree_deref_slot - dereference a slot | |
193 | * @pslot: pointer to slot, returned by radix_tree_lookup_slot | |
194 | * Returns: item that was stored in that slot with any direct pointer flag | |
195 | * removed. | |
196 | * | |
197 | * For use with radix_tree_lookup_slot(). Caller must hold tree at least read | |
27d20fdd NP |
198 | * locked across slot lookup and dereference. Not required if write lock is |
199 | * held (ie. items cannot be concurrently inserted). | |
200 | * | |
201 | * radix_tree_deref_retry must be used to confirm validity of the pointer if | |
202 | * only the read lock is held. | |
7cf9c2c7 NP |
203 | */ |
204 | static inline void *radix_tree_deref_slot(void **pslot) | |
205 | { | |
27d20fdd | 206 | return rcu_dereference(*pslot); |
7cf9c2c7 | 207 | } |
27d20fdd | 208 | |
29c1f677 MG |
209 | /** |
210 | * radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held | |
211 | * @pslot: pointer to slot, returned by radix_tree_lookup_slot | |
212 | * Returns: item that was stored in that slot with any direct pointer flag | |
213 | * removed. | |
214 | * | |
215 | * Similar to radix_tree_deref_slot but only used during migration when a pages | |
216 | * mapping is being moved. The caller does not hold the RCU read lock but it | |
217 | * must hold the tree lock to prevent parallel updates. | |
218 | */ | |
219 | static inline void *radix_tree_deref_slot_protected(void **pslot, | |
220 | spinlock_t *treelock) | |
221 | { | |
222 | return rcu_dereference_protected(*pslot, lockdep_is_held(treelock)); | |
223 | } | |
224 | ||
27d20fdd NP |
225 | /** |
226 | * radix_tree_deref_retry - check radix_tree_deref_slot | |
227 | * @arg: pointer returned by radix_tree_deref_slot | |
228 | * Returns: 0 if retry is not required, otherwise retry is required | |
229 | * | |
230 | * radix_tree_deref_retry must be used with radix_tree_deref_slot. | |
231 | */ | |
232 | static inline int radix_tree_deref_retry(void *arg) | |
233 | { | |
234 | return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR); | |
235 | } | |
236 | ||
6328650b HD |
237 | /** |
238 | * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry? | |
239 | * @arg: value returned by radix_tree_deref_slot | |
240 | * Returns: 0 if well-aligned pointer, non-0 if exceptional entry. | |
241 | */ | |
242 | static inline int radix_tree_exceptional_entry(void *arg) | |
243 | { | |
244 | /* Not unlikely because radix_tree_exception often tested first */ | |
245 | return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY; | |
246 | } | |
247 | ||
248 | /** | |
249 | * radix_tree_exception - radix_tree_deref_slot returned either exception? | |
250 | * @arg: value returned by radix_tree_deref_slot | |
251 | * Returns: 0 if well-aligned pointer, non-0 if either kind of exception. | |
252 | */ | |
253 | static inline int radix_tree_exception(void *arg) | |
254 | { | |
255 | return unlikely((unsigned long)arg & | |
256 | (RADIX_TREE_INDIRECT_PTR | RADIX_TREE_EXCEPTIONAL_ENTRY)); | |
257 | } | |
258 | ||
7cf9c2c7 NP |
259 | /** |
260 | * radix_tree_replace_slot - replace item in a slot | |
261 | * @pslot: pointer to slot, returned by radix_tree_lookup_slot | |
262 | * @item: new item to store in the slot. | |
263 | * | |
264 | * For use with radix_tree_lookup_slot(). Caller must hold tree write locked | |
265 | * across slot lookup and replacement. | |
266 | */ | |
267 | static inline void radix_tree_replace_slot(void **pslot, void *item) | |
268 | { | |
c0bc9875 NP |
269 | BUG_ON(radix_tree_is_indirect_ptr(item)); |
270 | rcu_assign_pointer(*pslot, item); | |
7cf9c2c7 NP |
271 | } |
272 | ||
139e5616 | 273 | int __radix_tree_create(struct radix_tree_root *root, unsigned long index, |
e6145236 MW |
274 | unsigned order, struct radix_tree_node **nodep, |
275 | void ***slotp); | |
276 | int __radix_tree_insert(struct radix_tree_root *, unsigned long index, | |
277 | unsigned order, void *); | |
278 | static inline int radix_tree_insert(struct radix_tree_root *root, | |
279 | unsigned long index, void *entry) | |
280 | { | |
281 | return __radix_tree_insert(root, index, 0, entry); | |
282 | } | |
139e5616 JW |
283 | void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, |
284 | struct radix_tree_node **nodep, void ***slotp); | |
1da177e4 | 285 | void *radix_tree_lookup(struct radix_tree_root *, unsigned long); |
a4331366 | 286 | void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); |
449dd698 | 287 | bool __radix_tree_delete_node(struct radix_tree_root *root, |
139e5616 | 288 | struct radix_tree_node *node); |
53c59f26 | 289 | void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); |
1da177e4 LT |
290 | void *radix_tree_delete(struct radix_tree_root *, unsigned long); |
291 | unsigned int | |
292 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |
293 | unsigned long first_index, unsigned int max_items); | |
6328650b HD |
294 | unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, |
295 | void ***results, unsigned long *indices, | |
47feff2c | 296 | unsigned long first_index, unsigned int max_items); |
dd0fc66f | 297 | int radix_tree_preload(gfp_t gfp_mask); |
5e4c0d97 | 298 | int radix_tree_maybe_preload(gfp_t gfp_mask); |
1da177e4 LT |
299 | void radix_tree_init(void); |
300 | void *radix_tree_tag_set(struct radix_tree_root *root, | |
daff89f3 | 301 | unsigned long index, unsigned int tag); |
1da177e4 | 302 | void *radix_tree_tag_clear(struct radix_tree_root *root, |
daff89f3 | 303 | unsigned long index, unsigned int tag); |
1da177e4 | 304 | int radix_tree_tag_get(struct radix_tree_root *root, |
daff89f3 | 305 | unsigned long index, unsigned int tag); |
1da177e4 LT |
306 | unsigned int |
307 | radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |
daff89f3 JC |
308 | unsigned long first_index, unsigned int max_items, |
309 | unsigned int tag); | |
47feff2c NP |
310 | unsigned int |
311 | radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | |
312 | unsigned long first_index, unsigned int max_items, | |
313 | unsigned int tag); | |
ebf8aa44 JK |
314 | unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, |
315 | unsigned long *first_indexp, unsigned long last_index, | |
316 | unsigned long nr_to_tag, | |
317 | unsigned int fromtag, unsigned int totag); | |
daff89f3 | 318 | int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); |
e504f3fd | 319 | unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item); |
1da177e4 LT |
320 | |
321 | static inline void radix_tree_preload_end(void) | |
322 | { | |
323 | preempt_enable(); | |
324 | } | |
325 | ||
78c1d784 KK |
326 | /** |
327 | * struct radix_tree_iter - radix tree iterator state | |
328 | * | |
329 | * @index: index of current slot | |
330 | * @next_index: next-to-last index for this chunk | |
331 | * @tags: bit-mask for tag-iterating | |
332 | * | |
333 | * This radix tree iterator works in terms of "chunks" of slots. A chunk is a | |
334 | * subinterval of slots contained within one radix tree leaf node. It is | |
335 | * described by a pointer to its first slot and a struct radix_tree_iter | |
336 | * which holds the chunk's position in the tree and its size. For tagged | |
337 | * iteration radix_tree_iter also holds the slots' bit-mask for one chosen | |
338 | * radix tree tag. | |
339 | */ | |
340 | struct radix_tree_iter { | |
341 | unsigned long index; | |
342 | unsigned long next_index; | |
343 | unsigned long tags; | |
344 | }; | |
345 | ||
346 | #define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ | |
347 | #define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ | |
348 | #define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */ | |
349 | ||
350 | /** | |
351 | * radix_tree_iter_init - initialize radix tree iterator | |
352 | * | |
353 | * @iter: pointer to iterator state | |
354 | * @start: iteration starting index | |
355 | * Returns: NULL | |
356 | */ | |
357 | static __always_inline void ** | |
358 | radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) | |
359 | { | |
360 | /* | |
361 | * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it | |
362 | * in the case of a successful tagged chunk lookup. If the lookup was | |
363 | * unsuccessful or non-tagged then nobody cares about ->tags. | |
364 | * | |
365 | * Set index to zero to bypass next_index overflow protection. | |
366 | * See the comment in radix_tree_next_chunk() for details. | |
367 | */ | |
368 | iter->index = 0; | |
369 | iter->next_index = start; | |
370 | return NULL; | |
371 | } | |
372 | ||
373 | /** | |
374 | * radix_tree_next_chunk - find next chunk of slots for iteration | |
375 | * | |
376 | * @root: radix tree root | |
377 | * @iter: iterator state | |
378 | * @flags: RADIX_TREE_ITER_* flags and tag index | |
379 | * Returns: pointer to chunk first slot, or NULL if there no more left | |
380 | * | |
381 | * This function looks up the next chunk in the radix tree starting from | |
382 | * @iter->next_index. It returns a pointer to the chunk's first slot. | |
383 | * Also it fills @iter with data about chunk: position in the tree (index), | |
384 | * its end (next_index), and constructs a bit mask for tagged iterating (tags). | |
385 | */ | |
386 | void **radix_tree_next_chunk(struct radix_tree_root *root, | |
387 | struct radix_tree_iter *iter, unsigned flags); | |
388 | ||
46437f9a MW |
389 | /** |
390 | * radix_tree_iter_retry - retry this chunk of the iteration | |
391 | * @iter: iterator state | |
392 | * | |
393 | * If we iterate over a tree protected only by the RCU lock, a race | |
394 | * against deletion or creation may result in seeing a slot for which | |
395 | * radix_tree_deref_retry() returns true. If so, call this function | |
396 | * and continue the iteration. | |
397 | */ | |
398 | static inline __must_check | |
399 | void **radix_tree_iter_retry(struct radix_tree_iter *iter) | |
400 | { | |
401 | iter->next_index = iter->index; | |
402 | return NULL; | |
403 | } | |
404 | ||
7165092f MW |
405 | /** |
406 | * radix_tree_iter_next - resume iterating when the chunk may be invalid | |
407 | * @iter: iterator state | |
408 | * | |
409 | * If the iterator needs to release then reacquire a lock, the chunk may | |
410 | * have been invalidated by an insertion or deletion. Call this function | |
411 | * to continue the iteration from the next index. | |
412 | */ | |
413 | static inline __must_check | |
414 | void **radix_tree_iter_next(struct radix_tree_iter *iter) | |
415 | { | |
416 | iter->next_index = iter->index + 1; | |
417 | iter->tags = 0; | |
418 | return NULL; | |
419 | } | |
420 | ||
78c1d784 KK |
421 | /** |
422 | * radix_tree_chunk_size - get current chunk size | |
423 | * | |
424 | * @iter: pointer to radix tree iterator | |
425 | * Returns: current chunk size | |
426 | */ | |
73204282 | 427 | static __always_inline long |
78c1d784 KK |
428 | radix_tree_chunk_size(struct radix_tree_iter *iter) |
429 | { | |
430 | return iter->next_index - iter->index; | |
431 | } | |
432 | ||
433 | /** | |
434 | * radix_tree_next_slot - find next slot in chunk | |
435 | * | |
436 | * @slot: pointer to current slot | |
437 | * @iter: pointer to interator state | |
438 | * @flags: RADIX_TREE_ITER_*, should be constant | |
439 | * Returns: pointer to next slot, or NULL if there no more left | |
440 | * | |
441 | * This function updates @iter->index in the case of a successful lookup. | |
442 | * For tagged lookup it also eats @iter->tags. | |
443 | */ | |
444 | static __always_inline void ** | |
445 | radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) | |
446 | { | |
447 | if (flags & RADIX_TREE_ITER_TAGGED) { | |
448 | iter->tags >>= 1; | |
449 | if (likely(iter->tags & 1ul)) { | |
450 | iter->index++; | |
451 | return slot + 1; | |
452 | } | |
453 | if (!(flags & RADIX_TREE_ITER_CONTIG) && likely(iter->tags)) { | |
454 | unsigned offset = __ffs(iter->tags); | |
455 | ||
456 | iter->tags >>= offset; | |
457 | iter->index += offset + 1; | |
458 | return slot + offset + 1; | |
459 | } | |
460 | } else { | |
73204282 | 461 | long size = radix_tree_chunk_size(iter); |
78c1d784 | 462 | |
73204282 | 463 | while (--size > 0) { |
78c1d784 KK |
464 | slot++; |
465 | iter->index++; | |
466 | if (likely(*slot)) | |
467 | return slot; | |
fffaee36 KK |
468 | if (flags & RADIX_TREE_ITER_CONTIG) { |
469 | /* forbid switching to the next chunk */ | |
470 | iter->next_index = 0; | |
78c1d784 | 471 | break; |
fffaee36 | 472 | } |
78c1d784 KK |
473 | } |
474 | } | |
475 | return NULL; | |
476 | } | |
477 | ||
478 | /** | |
479 | * radix_tree_for_each_chunk - iterate over chunks | |
480 | * | |
481 | * @slot: the void** variable for pointer to chunk first slot | |
482 | * @root: the struct radix_tree_root pointer | |
483 | * @iter: the struct radix_tree_iter pointer | |
484 | * @start: iteration starting index | |
485 | * @flags: RADIX_TREE_ITER_* and tag index | |
486 | * | |
487 | * Locks can be released and reacquired between iterations. | |
488 | */ | |
489 | #define radix_tree_for_each_chunk(slot, root, iter, start, flags) \ | |
490 | for (slot = radix_tree_iter_init(iter, start) ; \ | |
491 | (slot = radix_tree_next_chunk(root, iter, flags)) ;) | |
492 | ||
493 | /** | |
494 | * radix_tree_for_each_chunk_slot - iterate over slots in one chunk | |
495 | * | |
496 | * @slot: the void** variable, at the beginning points to chunk first slot | |
497 | * @iter: the struct radix_tree_iter pointer | |
498 | * @flags: RADIX_TREE_ITER_*, should be constant | |
499 | * | |
500 | * This macro is designed to be nested inside radix_tree_for_each_chunk(). | |
501 | * @slot points to the radix tree slot, @iter->index contains its index. | |
502 | */ | |
503 | #define radix_tree_for_each_chunk_slot(slot, iter, flags) \ | |
504 | for (; slot ; slot = radix_tree_next_slot(slot, iter, flags)) | |
505 | ||
506 | /** | |
507 | * radix_tree_for_each_slot - iterate over non-empty slots | |
508 | * | |
509 | * @slot: the void** variable for pointer to slot | |
510 | * @root: the struct radix_tree_root pointer | |
511 | * @iter: the struct radix_tree_iter pointer | |
512 | * @start: iteration starting index | |
513 | * | |
514 | * @slot points to radix tree slot, @iter->index contains its index. | |
515 | */ | |
516 | #define radix_tree_for_each_slot(slot, root, iter, start) \ | |
517 | for (slot = radix_tree_iter_init(iter, start) ; \ | |
518 | slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \ | |
519 | slot = radix_tree_next_slot(slot, iter, 0)) | |
520 | ||
521 | /** | |
522 | * radix_tree_for_each_contig - iterate over contiguous slots | |
523 | * | |
524 | * @slot: the void** variable for pointer to slot | |
525 | * @root: the struct radix_tree_root pointer | |
526 | * @iter: the struct radix_tree_iter pointer | |
527 | * @start: iteration starting index | |
528 | * | |
529 | * @slot points to radix tree slot, @iter->index contains its index. | |
530 | */ | |
531 | #define radix_tree_for_each_contig(slot, root, iter, start) \ | |
532 | for (slot = radix_tree_iter_init(iter, start) ; \ | |
533 | slot || (slot = radix_tree_next_chunk(root, iter, \ | |
534 | RADIX_TREE_ITER_CONTIG)) ; \ | |
535 | slot = radix_tree_next_slot(slot, iter, \ | |
536 | RADIX_TREE_ITER_CONTIG)) | |
537 | ||
538 | /** | |
539 | * radix_tree_for_each_tagged - iterate over tagged slots | |
540 | * | |
541 | * @slot: the void** variable for pointer to slot | |
542 | * @root: the struct radix_tree_root pointer | |
543 | * @iter: the struct radix_tree_iter pointer | |
544 | * @start: iteration starting index | |
545 | * @tag: tag index | |
546 | * | |
547 | * @slot points to radix tree slot, @iter->index contains its index. | |
548 | */ | |
549 | #define radix_tree_for_each_tagged(slot, root, iter, start, tag) \ | |
550 | for (slot = radix_tree_iter_init(iter, start) ; \ | |
551 | slot || (slot = radix_tree_next_chunk(root, iter, \ | |
552 | RADIX_TREE_ITER_TAGGED | tag)) ; \ | |
553 | slot = radix_tree_next_slot(slot, iter, \ | |
554 | RADIX_TREE_ITER_TAGGED)) | |
555 | ||
1da177e4 | 556 | #endif /* _LINUX_RADIX_TREE_H */ |