3 #include <linux/slab.h>
4 #include <linux/module.h>
5 #include <linux/spinlock.h>
6 #include <linux/version.h>
7 #include <linux/hardirq.h>
8 #include "extent_map.h"
10 /* temporary define until extent_map moves out of btrfs */
11 struct kmem_cache
*btrfs_cache_create(const char *name
, size_t size
,
12 unsigned long extra_flags
,
13 void (*ctor
)(void *, struct kmem_cache
*,
16 static struct kmem_cache
*extent_map_cache
;
18 int __init
extent_map_init(void)
20 extent_map_cache
= btrfs_cache_create("extent_map",
21 sizeof(struct extent_map
), 0,
23 if (!extent_map_cache
)
28 void extent_map_exit(void)
31 kmem_cache_destroy(extent_map_cache
);
35 * extent_map_tree_init - initialize extent map tree
36 * @tree: tree to initialize
37 * @mask: flags for memory allocations during tree operations
39 * Initialize the extent tree @tree. Should be called for each new inode
40 * or other user of the extent_map interface.
42 void extent_map_tree_init(struct extent_map_tree
*tree
, gfp_t mask
)
44 tree
->map
.rb_node
= NULL
;
45 spin_lock_init(&tree
->lock
);
47 EXPORT_SYMBOL(extent_map_tree_init
);
50 * alloc_extent_map - allocate new extent map structure
51 * @mask: memory allocation flags
53 * Allocate a new extent_map structure. The new structure is
54 * returned with a reference count of one and needs to be
55 * freed using free_extent_map()
57 struct extent_map
*alloc_extent_map(gfp_t mask
)
59 struct extent_map
*em
;
60 em
= kmem_cache_alloc(extent_map_cache
, mask
);
61 if (!em
|| IS_ERR(em
))
65 atomic_set(&em
->refs
, 1);
68 EXPORT_SYMBOL(alloc_extent_map
);
71 * free_extent_map - drop reference count of an extent_map
72 * @em: extent map beeing releasead
74 * Drops the reference out on @em by one and free the structure
75 * if the reference count hits zero.
77 void free_extent_map(struct extent_map
*em
)
81 WARN_ON(atomic_read(&em
->refs
) == 0);
82 if (atomic_dec_and_test(&em
->refs
)) {
84 kmem_cache_free(extent_map_cache
, em
);
87 EXPORT_SYMBOL(free_extent_map
);
89 static struct rb_node
*tree_insert(struct rb_root
*root
, u64 offset
,
92 struct rb_node
** p
= &root
->rb_node
;
93 struct rb_node
* parent
= NULL
;
94 struct extent_map
*entry
;
98 entry
= rb_entry(parent
, struct extent_map
, rb_node
);
100 WARN_ON(!entry
->in_tree
);
102 if (offset
< entry
->start
)
104 else if (offset
>= extent_map_end(entry
))
110 entry
= rb_entry(node
, struct extent_map
, rb_node
);
112 rb_link_node(node
, parent
, p
);
113 rb_insert_color(node
, root
);
117 static struct rb_node
*__tree_search(struct rb_root
*root
, u64 offset
,
118 struct rb_node
**prev_ret
,
119 struct rb_node
**next_ret
)
121 struct rb_node
* n
= root
->rb_node
;
122 struct rb_node
*prev
= NULL
;
123 struct rb_node
*orig_prev
= NULL
;
124 struct extent_map
*entry
;
125 struct extent_map
*prev_entry
= NULL
;
128 entry
= rb_entry(n
, struct extent_map
, rb_node
);
132 WARN_ON(!entry
->in_tree
);
134 if (offset
< entry
->start
)
136 else if (offset
>= extent_map_end(entry
))
144 while(prev
&& offset
>= extent_map_end(prev_entry
)) {
145 prev
= rb_next(prev
);
146 prev_entry
= rb_entry(prev
, struct extent_map
, rb_node
);
153 prev_entry
= rb_entry(prev
, struct extent_map
, rb_node
);
154 while(prev
&& offset
< prev_entry
->start
) {
155 prev
= rb_prev(prev
);
156 prev_entry
= rb_entry(prev
, struct extent_map
, rb_node
);
163 static inline struct rb_node
*tree_search(struct rb_root
*root
, u64 offset
)
165 struct rb_node
*prev
;
167 ret
= __tree_search(root
, offset
, &prev
, NULL
);
173 static int mergable_maps(struct extent_map
*prev
, struct extent_map
*next
)
175 if (test_bit(EXTENT_FLAG_PINNED
, &prev
->flags
))
178 if (extent_map_end(prev
) == next
->start
&&
179 prev
->flags
== next
->flags
&&
180 prev
->bdev
== next
->bdev
&&
181 ((next
->block_start
== EXTENT_MAP_HOLE
&&
182 prev
->block_start
== EXTENT_MAP_HOLE
) ||
183 (next
->block_start
== EXTENT_MAP_INLINE
&&
184 prev
->block_start
== EXTENT_MAP_INLINE
) ||
185 (next
->block_start
== EXTENT_MAP_DELALLOC
&&
186 prev
->block_start
== EXTENT_MAP_DELALLOC
) ||
187 (next
->block_start
< EXTENT_MAP_LAST_BYTE
- 1 &&
188 next
->block_start
== extent_map_block_end(prev
)))) {
195 * add_extent_mapping - add new extent map to the extent tree
196 * @tree: tree to insert new map in
199 * Insert @em into @tree or perform a simple forward/backward merge with
200 * existing mappings. The extent_map struct passed in will be inserted
201 * into the tree directly, with an additional reference taken, or a
202 * reference dropped if the merge attempt was sucessfull.
204 int add_extent_mapping(struct extent_map_tree
*tree
,
205 struct extent_map
*em
)
208 struct extent_map
*merge
= NULL
;
210 struct extent_map
*exist
;
212 exist
= lookup_extent_mapping(tree
, em
->start
, em
->len
);
214 free_extent_map(exist
);
218 assert_spin_locked(&tree
->lock
);
219 rb
= tree_insert(&tree
->map
, em
->start
, &em
->rb_node
);
222 free_extent_map(merge
);
225 atomic_inc(&em
->refs
);
226 if (em
->start
!= 0) {
227 rb
= rb_prev(&em
->rb_node
);
229 merge
= rb_entry(rb
, struct extent_map
, rb_node
);
230 if (rb
&& mergable_maps(merge
, em
)) {
231 em
->start
= merge
->start
;
232 em
->len
+= merge
->len
;
233 em
->block_start
= merge
->block_start
;
235 rb_erase(&merge
->rb_node
, &tree
->map
);
236 free_extent_map(merge
);
239 rb
= rb_next(&em
->rb_node
);
241 merge
= rb_entry(rb
, struct extent_map
, rb_node
);
242 if (rb
&& mergable_maps(em
, merge
)) {
243 em
->len
+= merge
->len
;
244 rb_erase(&merge
->rb_node
, &tree
->map
);
246 free_extent_map(merge
);
251 EXPORT_SYMBOL(add_extent_mapping
);
253 static u64
range_end(u64 start
, u64 len
)
255 if (start
+ len
< start
)
261 * lookup_extent_mapping - lookup extent_map
262 * @tree: tree to lookup in
263 * @start: byte offset to start the search
264 * @len: length of the lookup range
266 * Find and return the first extent_map struct in @tree that intersects the
267 * [start, len] range. There may be additional objects in the tree that
268 * intersect, so check the object returned carefully to make sure that no
269 * additional lookups are needed.
271 struct extent_map
*lookup_extent_mapping(struct extent_map_tree
*tree
,
274 struct extent_map
*em
;
275 struct rb_node
*rb_node
;
276 struct rb_node
*prev
= NULL
;
277 struct rb_node
*next
= NULL
;
278 u64 end
= range_end(start
, len
);
280 assert_spin_locked(&tree
->lock
);
281 rb_node
= __tree_search(&tree
->map
, start
, &prev
, &next
);
282 if (!rb_node
&& prev
) {
283 em
= rb_entry(prev
, struct extent_map
, rb_node
);
284 if (end
> em
->start
&& start
< extent_map_end(em
))
287 if (!rb_node
&& next
) {
288 em
= rb_entry(next
, struct extent_map
, rb_node
);
289 if (end
> em
->start
&& start
< extent_map_end(em
))
296 if (IS_ERR(rb_node
)) {
297 em
= ERR_PTR(PTR_ERR(rb_node
));
300 em
= rb_entry(rb_node
, struct extent_map
, rb_node
);
301 if (end
> em
->start
&& start
< extent_map_end(em
))
308 atomic_inc(&em
->refs
);
312 EXPORT_SYMBOL(lookup_extent_mapping
);
315 * remove_extent_mapping - removes an extent_map from the extent tree
316 * @tree: extent tree to remove from
317 * @em: extent map beeing removed
319 * Removes @em from @tree. No reference counts are dropped, and no checks
320 * are done to see if the range is in use
322 int remove_extent_mapping(struct extent_map_tree
*tree
, struct extent_map
*em
)
326 WARN_ON(test_bit(EXTENT_FLAG_PINNED
, &em
->flags
));
327 assert_spin_locked(&tree
->lock
);
328 rb_erase(&em
->rb_node
, &tree
->map
);
332 EXPORT_SYMBOL(remove_extent_mapping
);
This page took 0.063254 seconds and 5 git commands to generate.