Merge tag 'aa-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/jj/linux-apparmo...
[deliverable/linux.git] / fs / btrfs / extent_map.c
1 #include <linux/err.h>
2 #include <linux/slab.h>
3 #include <linux/spinlock.h>
4 #include <linux/hardirq.h>
5 #include "ctree.h"
6 #include "extent_map.h"
7
8
9 static struct kmem_cache *extent_map_cache;
10
11 int __init extent_map_init(void)
12 {
13 extent_map_cache = kmem_cache_create("btrfs_extent_map",
14 sizeof(struct extent_map), 0,
15 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
16 if (!extent_map_cache)
17 return -ENOMEM;
18 return 0;
19 }
20
21 void extent_map_exit(void)
22 {
23 if (extent_map_cache)
24 kmem_cache_destroy(extent_map_cache);
25 }
26
27 /**
28 * extent_map_tree_init - initialize extent map tree
29 * @tree: tree to initialize
30 *
31 * Initialize the extent tree @tree. Should be called for each new inode
32 * or other user of the extent_map interface.
33 */
34 void extent_map_tree_init(struct extent_map_tree *tree)
35 {
36 tree->map = RB_ROOT;
37 INIT_LIST_HEAD(&tree->modified_extents);
38 rwlock_init(&tree->lock);
39 }
40
41 /**
42 * alloc_extent_map - allocate new extent map structure
43 *
44 * Allocate a new extent_map structure. The new structure is
45 * returned with a reference count of one and needs to be
46 * freed using free_extent_map()
47 */
48 struct extent_map *alloc_extent_map(void)
49 {
50 struct extent_map *em;
51 em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
52 if (!em)
53 return NULL;
54 em->in_tree = 0;
55 em->flags = 0;
56 em->compress_type = BTRFS_COMPRESS_NONE;
57 em->generation = 0;
58 atomic_set(&em->refs, 1);
59 INIT_LIST_HEAD(&em->list);
60 return em;
61 }
62
63 /**
64 * free_extent_map - drop reference count of an extent_map
65 * @em: extent map beeing releasead
66 *
67 * Drops the reference out on @em by one and free the structure
68 * if the reference count hits zero.
69 */
70 void free_extent_map(struct extent_map *em)
71 {
72 if (!em)
73 return;
74 WARN_ON(atomic_read(&em->refs) == 0);
75 if (atomic_dec_and_test(&em->refs)) {
76 WARN_ON(em->in_tree);
77 WARN_ON(!list_empty(&em->list));
78 kmem_cache_free(extent_map_cache, em);
79 }
80 }
81
82 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
83 struct rb_node *node)
84 {
85 struct rb_node **p = &root->rb_node;
86 struct rb_node *parent = NULL;
87 struct extent_map *entry;
88
89 while (*p) {
90 parent = *p;
91 entry = rb_entry(parent, struct extent_map, rb_node);
92
93 WARN_ON(!entry->in_tree);
94
95 if (offset < entry->start)
96 p = &(*p)->rb_left;
97 else if (offset >= extent_map_end(entry))
98 p = &(*p)->rb_right;
99 else
100 return parent;
101 }
102
103 entry = rb_entry(node, struct extent_map, rb_node);
104 entry->in_tree = 1;
105 rb_link_node(node, parent, p);
106 rb_insert_color(node, root);
107 return NULL;
108 }
109
110 /*
111 * search through the tree for an extent_map with a given offset. If
112 * it can't be found, try to find some neighboring extents
113 */
114 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
115 struct rb_node **prev_ret,
116 struct rb_node **next_ret)
117 {
118 struct rb_node *n = root->rb_node;
119 struct rb_node *prev = NULL;
120 struct rb_node *orig_prev = NULL;
121 struct extent_map *entry;
122 struct extent_map *prev_entry = NULL;
123
124 while (n) {
125 entry = rb_entry(n, struct extent_map, rb_node);
126 prev = n;
127 prev_entry = entry;
128
129 WARN_ON(!entry->in_tree);
130
131 if (offset < entry->start)
132 n = n->rb_left;
133 else if (offset >= extent_map_end(entry))
134 n = n->rb_right;
135 else
136 return n;
137 }
138
139 if (prev_ret) {
140 orig_prev = prev;
141 while (prev && offset >= extent_map_end(prev_entry)) {
142 prev = rb_next(prev);
143 prev_entry = rb_entry(prev, struct extent_map, rb_node);
144 }
145 *prev_ret = prev;
146 prev = orig_prev;
147 }
148
149 if (next_ret) {
150 prev_entry = rb_entry(prev, struct extent_map, rb_node);
151 while (prev && offset < prev_entry->start) {
152 prev = rb_prev(prev);
153 prev_entry = rb_entry(prev, struct extent_map, rb_node);
154 }
155 *next_ret = prev;
156 }
157 return NULL;
158 }
159
160 /* check to see if two extent_map structs are adjacent and safe to merge */
161 static int mergable_maps(struct extent_map *prev, struct extent_map *next)
162 {
163 if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
164 return 0;
165
166 /*
167 * don't merge compressed extents, we need to know their
168 * actual size
169 */
170 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
171 return 0;
172
173 if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
174 test_bit(EXTENT_FLAG_LOGGING, &next->flags))
175 return 0;
176
177 /*
178 * We don't want to merge stuff that hasn't been written to the log yet
179 * since it may not reflect exactly what is on disk, and that would be
180 * bad.
181 */
182 if (!list_empty(&prev->list) || !list_empty(&next->list))
183 return 0;
184
185 if (extent_map_end(prev) == next->start &&
186 prev->flags == next->flags &&
187 prev->bdev == next->bdev &&
188 ((next->block_start == EXTENT_MAP_HOLE &&
189 prev->block_start == EXTENT_MAP_HOLE) ||
190 (next->block_start == EXTENT_MAP_INLINE &&
191 prev->block_start == EXTENT_MAP_INLINE) ||
192 (next->block_start == EXTENT_MAP_DELALLOC &&
193 prev->block_start == EXTENT_MAP_DELALLOC) ||
194 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
195 next->block_start == extent_map_block_end(prev)))) {
196 return 1;
197 }
198 return 0;
199 }
200
201 static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
202 {
203 struct extent_map *merge = NULL;
204 struct rb_node *rb;
205
206 if (em->start != 0) {
207 rb = rb_prev(&em->rb_node);
208 if (rb)
209 merge = rb_entry(rb, struct extent_map, rb_node);
210 if (rb && mergable_maps(merge, em)) {
211 em->start = merge->start;
212 em->orig_start = merge->orig_start;
213 em->len += merge->len;
214 em->block_len += merge->block_len;
215 em->block_start = merge->block_start;
216 merge->in_tree = 0;
217 em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
218 em->mod_start = merge->mod_start;
219 em->generation = max(em->generation, merge->generation);
220
221 rb_erase(&merge->rb_node, &tree->map);
222 free_extent_map(merge);
223 }
224 }
225
226 rb = rb_next(&em->rb_node);
227 if (rb)
228 merge = rb_entry(rb, struct extent_map, rb_node);
229 if (rb && mergable_maps(em, merge)) {
230 em->len += merge->len;
231 em->block_len += merge->len;
232 rb_erase(&merge->rb_node, &tree->map);
233 merge->in_tree = 0;
234 em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
235 em->generation = max(em->generation, merge->generation);
236 free_extent_map(merge);
237 }
238 }
239
240 /**
241 * unpin_extent_cache - unpin an extent from the cache
242 * @tree: tree to unpin the extent in
243 * @start: logical offset in the file
244 * @len: length of the extent
245 * @gen: generation that this extent has been modified in
246 *
247 * Called after an extent has been written to disk properly. Set the generation
248 * to the generation that actually added the file item to the inode so we know
249 * we need to sync this extent when we call fsync().
250 */
251 int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
252 u64 gen)
253 {
254 int ret = 0;
255 struct extent_map *em;
256 bool prealloc = false;
257
258 write_lock(&tree->lock);
259 em = lookup_extent_mapping(tree, start, len);
260
261 WARN_ON(!em || em->start != start);
262
263 if (!em)
264 goto out;
265
266 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
267 list_move(&em->list, &tree->modified_extents);
268 em->generation = gen;
269 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
270 em->mod_start = em->start;
271 em->mod_len = em->len;
272
273 if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
274 prealloc = true;
275 clear_bit(EXTENT_FLAG_FILLING, &em->flags);
276 }
277
278 try_merge_map(tree, em);
279
280 if (prealloc) {
281 em->mod_start = em->start;
282 em->mod_len = em->len;
283 }
284
285 free_extent_map(em);
286 out:
287 write_unlock(&tree->lock);
288 return ret;
289
290 }
291
292 void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
293 {
294 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
295 if (em->in_tree)
296 try_merge_map(tree, em);
297 }
298
299 /**
300 * add_extent_mapping - add new extent map to the extent tree
301 * @tree: tree to insert new map in
302 * @em: map to insert
303 *
304 * Insert @em into @tree or perform a simple forward/backward merge with
305 * existing mappings. The extent_map struct passed in will be inserted
306 * into the tree directly, with an additional reference taken, or a
307 * reference dropped if the merge attempt was successful.
308 */
309 int add_extent_mapping(struct extent_map_tree *tree,
310 struct extent_map *em, int modified)
311 {
312 int ret = 0;
313 struct rb_node *rb;
314 struct extent_map *exist;
315
316 exist = lookup_extent_mapping(tree, em->start, em->len);
317 if (exist) {
318 free_extent_map(exist);
319 ret = -EEXIST;
320 goto out;
321 }
322 rb = tree_insert(&tree->map, em->start, &em->rb_node);
323 if (rb) {
324 ret = -EEXIST;
325 goto out;
326 }
327 atomic_inc(&em->refs);
328
329 em->mod_start = em->start;
330 em->mod_len = em->len;
331
332 if (modified)
333 list_move(&em->list, &tree->modified_extents);
334 else
335 try_merge_map(tree, em);
336 out:
337 return ret;
338 }
339
340 /* simple helper to do math around the end of an extent, handling wrap */
341 static u64 range_end(u64 start, u64 len)
342 {
343 if (start + len < start)
344 return (u64)-1;
345 return start + len;
346 }
347
348 static struct extent_map *
349 __lookup_extent_mapping(struct extent_map_tree *tree,
350 u64 start, u64 len, int strict)
351 {
352 struct extent_map *em;
353 struct rb_node *rb_node;
354 struct rb_node *prev = NULL;
355 struct rb_node *next = NULL;
356 u64 end = range_end(start, len);
357
358 rb_node = __tree_search(&tree->map, start, &prev, &next);
359 if (!rb_node) {
360 if (prev)
361 rb_node = prev;
362 else if (next)
363 rb_node = next;
364 else
365 return NULL;
366 }
367
368 em = rb_entry(rb_node, struct extent_map, rb_node);
369
370 if (strict && !(end > em->start && start < extent_map_end(em)))
371 return NULL;
372
373 atomic_inc(&em->refs);
374 return em;
375 }
376
377 /**
378 * lookup_extent_mapping - lookup extent_map
379 * @tree: tree to lookup in
380 * @start: byte offset to start the search
381 * @len: length of the lookup range
382 *
383 * Find and return the first extent_map struct in @tree that intersects the
384 * [start, len] range. There may be additional objects in the tree that
385 * intersect, so check the object returned carefully to make sure that no
386 * additional lookups are needed.
387 */
388 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
389 u64 start, u64 len)
390 {
391 return __lookup_extent_mapping(tree, start, len, 1);
392 }
393
394 /**
395 * search_extent_mapping - find a nearby extent map
396 * @tree: tree to lookup in
397 * @start: byte offset to start the search
398 * @len: length of the lookup range
399 *
400 * Find and return the first extent_map struct in @tree that intersects the
401 * [start, len] range.
402 *
403 * If one can't be found, any nearby extent may be returned
404 */
405 struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
406 u64 start, u64 len)
407 {
408 return __lookup_extent_mapping(tree, start, len, 0);
409 }
410
411 /**
412 * remove_extent_mapping - removes an extent_map from the extent tree
413 * @tree: extent tree to remove from
414 * @em: extent map beeing removed
415 *
416 * Removes @em from @tree. No reference counts are dropped, and no checks
417 * are done to see if the range is in use
418 */
419 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
420 {
421 int ret = 0;
422
423 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
424 rb_erase(&em->rb_node, &tree->map);
425 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
426 list_del_init(&em->list);
427 em->in_tree = 0;
428 return ret;
429 }
This page took 0.041999 seconds and 6 git commands to generate.