2 * fs/ext4/extents_status.c
4 * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
6 * Allison Henderson <achender@linux.vnet.ibm.com>
7 * Hugh Dickins <hughd@google.com>
8 * Zheng Liu <wenqing.lz@taobao.com>
10 * Ext4 extents status tree core functions.
12 #include <linux/rbtree.h>
13 #include <linux/list_sort.h>
14 #include <linux/proc_fs.h>
15 #include <linux/seq_file.h>
17 #include "extents_status.h"
19 #include <trace/events/ext4.h>
22 * According to previous discussion in Ext4 Developer Workshop, we
23 * will introduce a new structure called io tree to track all extent
24 * status in order to solve some problems that we have met
25 * (e.g. Reservation space warning), and provide extent-level locking.
26 * Delay extent tree is the first step to achieve this goal. It is
27 * original built by Yongqiang Yang. At that time it is called delay
28 * extent tree, whose goal is only track delayed extents in memory to
29 * simplify the implementation of fiemap and bigalloc, and introduce
30 * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called
31 * delay extent tree at the first commit. But for better understand
32 * what it does, it has been rename to extent status tree.
35 * Currently the first step has been done. All delayed extents are
36 * tracked in the tree. It maintains the delayed extent when a delayed
37 * allocation is issued, and the delayed extent is written out or
38 * invalidated. Therefore the implementation of fiemap and bigalloc
39 * are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
41 * The following comment describes the implemenmtation of extent
42 * status tree and future works.
45 * In this step all extent status are tracked by extent status tree.
46 * Thus, we can first try to lookup a block mapping in this tree before
47 * finding it in extent tree. Hence, single extent cache can be removed
48 * because extent status tree can do a better job. Extents in status
49 * tree are loaded on-demand. Therefore, the extent status tree may not
50 * contain all of the extents in a file. Meanwhile we define a shrinker
51 * to reclaim memory from extent status tree because fragmented extent
52 * tree will make status tree cost too much memory. written/unwritten/-
53 * hole extents in the tree will be reclaimed by this shrinker when we
54 * are under high memory pressure. Delayed extents will not be
55 * reclimed because fiemap, bigalloc, and seek_data/hole need it.
59 * Extent status tree implementation for ext4.
62 * ==========================================================================
63 * Extent status tree tracks all extent status.
65 * 1. Why we need to implement extent status tree?
67 * Without extent status tree, ext4 identifies a delayed extent by looking
68 * up page cache, this has several deficiencies - complicated, buggy,
69 * and inefficient code.
71 * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a
72 * block or a range of blocks are belonged to a delayed extent.
74 * Let us have a look at how they do without extent status tree.
76 * FIEMAP looks up page cache to identify delayed allocations from holes.
79 * SEEK_HOLE/DATA has the same problem as FIEMAP.
82 * bigalloc looks up page cache to figure out if a block is
83 * already under delayed allocation or not to determine whether
84 * quota reserving is needed for the cluster.
87 * Writeout looks up whole page cache to see if a buffer is
88 * mapped, If there are not very many delayed buffers, then it is
91 * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA,
92 * bigalloc and writeout can figure out if a block or a range of
93 * blocks is under delayed allocation(belonged to a delayed extent) or
94 * not by searching the extent tree.
97 * ==========================================================================
98 * 2. Ext4 extent status tree impelmentation
101 * A extent is a range of blocks which are contiguous logically and
102 * physically. Unlike extent in extent tree, this extent in ext4 is
103 * a in-memory struct, there is no corresponding on-disk data. There
104 * is no limit on length of extent, so an extent can contain as many
105 * blocks as they are contiguous logically and physically.
107 * -- extent status tree
108 * Every inode has an extent status tree and all allocation blocks
109 * are added to the tree with different status. The extent in the
110 * tree are ordered by logical block no.
112 * -- operations on a extent status tree
113 * There are three important operations on a delayed extent tree: find
114 * next extent, adding a extent(a range of blocks) and removing a extent.
116 * -- race on a extent status tree
117 * Extent status tree is protected by inode->i_es_lock.
119 * -- memory consumption
120 * Fragmented extent tree will make extent status tree cost too much
121 * memory. Hence, we will reclaim written/unwritten/hole extents from
122 * the tree under a heavy memory pressure.
125 * ==========================================================================
126 * 3. Performance analysis
129 * 1. There is a cache extent for write access, so if writes are
130 * not very random, adding space operaions are in O(1) time.
133 * 2. Code is much simpler, more readable, more maintainable and
137 * ==========================================================================
140 * -- Refactor delayed space reservation
142 * -- Extent-level locking
145 static struct kmem_cache
*ext4_es_cachep
;
147 static int __es_insert_extent(struct inode
*inode
, struct extent_status
*newes
);
148 static int __es_remove_extent(struct inode
*inode
, ext4_lblk_t lblk
,
150 static int __es_try_to_reclaim_extents(struct ext4_inode_info
*ei
,
152 static int __es_shrink(struct ext4_sb_info
*sbi
, int nr_to_scan
,
153 struct ext4_inode_info
*locked_ei
);
155 int __init
ext4_init_es(void)
157 ext4_es_cachep
= kmem_cache_create("ext4_extent_status",
158 sizeof(struct extent_status
),
159 0, (SLAB_RECLAIM_ACCOUNT
), NULL
);
160 if (ext4_es_cachep
== NULL
)
165 void ext4_exit_es(void)
168 kmem_cache_destroy(ext4_es_cachep
);
171 void ext4_es_init_tree(struct ext4_es_tree
*tree
)
173 tree
->root
= RB_ROOT
;
174 tree
->cache_es
= NULL
;
178 static void ext4_es_print_tree(struct inode
*inode
)
180 struct ext4_es_tree
*tree
;
181 struct rb_node
*node
;
183 printk(KERN_DEBUG
"status extents for inode %lu:", inode
->i_ino
);
184 tree
= &EXT4_I(inode
)->i_es_tree
;
185 node
= rb_first(&tree
->root
);
187 struct extent_status
*es
;
188 es
= rb_entry(node
, struct extent_status
, rb_node
);
189 printk(KERN_DEBUG
" [%u/%u) %llu %x",
190 es
->es_lblk
, es
->es_len
,
191 ext4_es_pblock(es
), ext4_es_status(es
));
192 node
= rb_next(node
);
194 printk(KERN_DEBUG
"\n");
197 #define ext4_es_print_tree(inode)
200 static inline ext4_lblk_t
ext4_es_end(struct extent_status
*es
)
202 BUG_ON(es
->es_lblk
+ es
->es_len
< es
->es_lblk
);
203 return es
->es_lblk
+ es
->es_len
- 1;
207 * search through the tree for an delayed extent with a given offset. If
208 * it can't be found, try to find next extent.
210 static struct extent_status
*__es_tree_search(struct rb_root
*root
,
213 struct rb_node
*node
= root
->rb_node
;
214 struct extent_status
*es
= NULL
;
217 es
= rb_entry(node
, struct extent_status
, rb_node
);
218 if (lblk
< es
->es_lblk
)
219 node
= node
->rb_left
;
220 else if (lblk
> ext4_es_end(es
))
221 node
= node
->rb_right
;
226 if (es
&& lblk
< es
->es_lblk
)
229 if (es
&& lblk
> ext4_es_end(es
)) {
230 node
= rb_next(&es
->rb_node
);
231 return node
? rb_entry(node
, struct extent_status
, rb_node
) :
239 * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering
240 * @es->lblk if it exists, otherwise, the next extent after @es->lblk.
242 * @inode: the inode which owns delayed extents
243 * @lblk: the offset where we start to search
244 * @end: the offset where we stop to search
245 * @es: delayed extent that we found
247 void ext4_es_find_delayed_extent_range(struct inode
*inode
,
248 ext4_lblk_t lblk
, ext4_lblk_t end
,
249 struct extent_status
*es
)
251 struct ext4_es_tree
*tree
= NULL
;
252 struct extent_status
*es1
= NULL
;
253 struct rb_node
*node
;
257 trace_ext4_es_find_delayed_extent_range_enter(inode
, lblk
);
259 read_lock(&EXT4_I(inode
)->i_es_lock
);
260 tree
= &EXT4_I(inode
)->i_es_tree
;
262 /* find extent in cache firstly */
263 es
->es_lblk
= es
->es_len
= es
->es_pblk
= 0;
264 if (tree
->cache_es
) {
265 es1
= tree
->cache_es
;
266 if (in_range(lblk
, es1
->es_lblk
, es1
->es_len
)) {
267 es_debug("%u cached by [%u/%u) %llu %x\n",
268 lblk
, es1
->es_lblk
, es1
->es_len
,
269 ext4_es_pblock(es1
), ext4_es_status(es1
));
274 es1
= __es_tree_search(&tree
->root
, lblk
);
277 if (es1
&& !ext4_es_is_delayed(es1
)) {
278 while ((node
= rb_next(&es1
->rb_node
)) != NULL
) {
279 es1
= rb_entry(node
, struct extent_status
, rb_node
);
280 if (es1
->es_lblk
> end
) {
284 if (ext4_es_is_delayed(es1
))
289 if (es1
&& ext4_es_is_delayed(es1
)) {
290 tree
->cache_es
= es1
;
291 es
->es_lblk
= es1
->es_lblk
;
292 es
->es_len
= es1
->es_len
;
293 es
->es_pblk
= es1
->es_pblk
;
296 read_unlock(&EXT4_I(inode
)->i_es_lock
);
298 trace_ext4_es_find_delayed_extent_range_exit(inode
, es
);
301 void ext4_es_list_add(struct inode
*inode
)
303 struct ext4_inode_info
*ei
= EXT4_I(inode
);
304 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
306 if (!list_empty(&ei
->i_es_list
))
309 spin_lock(&sbi
->s_es_lock
);
310 if (list_empty(&ei
->i_es_list
)) {
311 list_add_tail(&ei
->i_es_list
, &sbi
->s_es_list
);
312 sbi
->s_es_nr_inode
++;
314 spin_unlock(&sbi
->s_es_lock
);
317 void ext4_es_list_del(struct inode
*inode
)
319 struct ext4_inode_info
*ei
= EXT4_I(inode
);
320 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
322 spin_lock(&sbi
->s_es_lock
);
323 if (!list_empty(&ei
->i_es_list
)) {
324 list_del_init(&ei
->i_es_list
);
325 sbi
->s_es_nr_inode
--;
326 WARN_ON_ONCE(sbi
->s_es_nr_inode
< 0);
328 spin_unlock(&sbi
->s_es_lock
);
331 static struct extent_status
*
332 ext4_es_alloc_extent(struct inode
*inode
, ext4_lblk_t lblk
, ext4_lblk_t len
,
335 struct extent_status
*es
;
336 es
= kmem_cache_alloc(ext4_es_cachep
, GFP_ATOMIC
);
344 * We don't count delayed extent because we never try to reclaim them
346 if (!ext4_es_is_delayed(es
)) {
347 EXT4_I(inode
)->i_es_shk_nr
++;
348 percpu_counter_inc(&EXT4_SB(inode
->i_sb
)->
349 s_es_stats
.es_stats_shk_cnt
);
352 EXT4_I(inode
)->i_es_all_nr
++;
353 percpu_counter_inc(&EXT4_SB(inode
->i_sb
)->s_es_stats
.es_stats_all_cnt
);
358 static void ext4_es_free_extent(struct inode
*inode
, struct extent_status
*es
)
360 EXT4_I(inode
)->i_es_all_nr
--;
361 percpu_counter_dec(&EXT4_SB(inode
->i_sb
)->s_es_stats
.es_stats_all_cnt
);
363 /* Decrease the shrink counter when this es is not delayed */
364 if (!ext4_es_is_delayed(es
)) {
365 BUG_ON(EXT4_I(inode
)->i_es_shk_nr
== 0);
366 EXT4_I(inode
)->i_es_shk_nr
--;
367 percpu_counter_dec(&EXT4_SB(inode
->i_sb
)->
368 s_es_stats
.es_stats_shk_cnt
);
371 kmem_cache_free(ext4_es_cachep
, es
);
375 * Check whether or not two extents can be merged
377 * - logical block number is contiguous
378 * - physical block number is contiguous
381 static int ext4_es_can_be_merged(struct extent_status
*es1
,
382 struct extent_status
*es2
)
384 if (ext4_es_status(es1
) != ext4_es_status(es2
))
387 if (((__u64
) es1
->es_len
) + es2
->es_len
> EXT_MAX_BLOCKS
) {
388 pr_warn("ES assertion failed when merging extents. "
389 "The sum of lengths of es1 (%d) and es2 (%d) "
390 "is bigger than allowed file size (%d)\n",
391 es1
->es_len
, es2
->es_len
, EXT_MAX_BLOCKS
);
396 if (((__u64
) es1
->es_lblk
) + es1
->es_len
!= es2
->es_lblk
)
399 if ((ext4_es_is_written(es1
) || ext4_es_is_unwritten(es1
)) &&
400 (ext4_es_pblock(es1
) + es1
->es_len
== ext4_es_pblock(es2
)))
403 if (ext4_es_is_hole(es1
))
406 /* we need to check delayed extent is without unwritten status */
407 if (ext4_es_is_delayed(es1
) && !ext4_es_is_unwritten(es1
))
413 static struct extent_status
*
414 ext4_es_try_to_merge_left(struct inode
*inode
, struct extent_status
*es
)
416 struct ext4_es_tree
*tree
= &EXT4_I(inode
)->i_es_tree
;
417 struct extent_status
*es1
;
418 struct rb_node
*node
;
420 node
= rb_prev(&es
->rb_node
);
424 es1
= rb_entry(node
, struct extent_status
, rb_node
);
425 if (ext4_es_can_be_merged(es1
, es
)) {
426 es1
->es_len
+= es
->es_len
;
427 rb_erase(&es
->rb_node
, &tree
->root
);
428 ext4_es_free_extent(inode
, es
);
435 static struct extent_status
*
436 ext4_es_try_to_merge_right(struct inode
*inode
, struct extent_status
*es
)
438 struct ext4_es_tree
*tree
= &EXT4_I(inode
)->i_es_tree
;
439 struct extent_status
*es1
;
440 struct rb_node
*node
;
442 node
= rb_next(&es
->rb_node
);
446 es1
= rb_entry(node
, struct extent_status
, rb_node
);
447 if (ext4_es_can_be_merged(es
, es1
)) {
448 es
->es_len
+= es1
->es_len
;
449 rb_erase(node
, &tree
->root
);
450 ext4_es_free_extent(inode
, es1
);
456 #ifdef ES_AGGRESSIVE_TEST
457 #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */
459 static void ext4_es_insert_extent_ext_check(struct inode
*inode
,
460 struct extent_status
*es
)
462 struct ext4_ext_path
*path
= NULL
;
463 struct ext4_extent
*ex
;
464 ext4_lblk_t ee_block
;
465 ext4_fsblk_t ee_start
;
466 unsigned short ee_len
;
467 int depth
, ee_status
, es_status
;
469 path
= ext4_find_extent(inode
, es
->es_lblk
, NULL
, EXT4_EX_NOCACHE
);
473 depth
= ext_depth(inode
);
474 ex
= path
[depth
].p_ext
;
478 ee_block
= le32_to_cpu(ex
->ee_block
);
479 ee_start
= ext4_ext_pblock(ex
);
480 ee_len
= ext4_ext_get_actual_len(ex
);
482 ee_status
= ext4_ext_is_unwritten(ex
) ? 1 : 0;
483 es_status
= ext4_es_is_unwritten(es
) ? 1 : 0;
486 * Make sure ex and es are not overlap when we try to insert
487 * a delayed/hole extent.
489 if (!ext4_es_is_written(es
) && !ext4_es_is_unwritten(es
)) {
490 if (in_range(es
->es_lblk
, ee_block
, ee_len
)) {
491 pr_warn("ES insert assertion failed for "
492 "inode: %lu we can find an extent "
493 "at block [%d/%d/%llu/%c], but we "
494 "want to add a delayed/hole extent "
496 inode
->i_ino
, ee_block
, ee_len
,
497 ee_start
, ee_status
? 'u' : 'w',
498 es
->es_lblk
, es
->es_len
,
499 ext4_es_pblock(es
), ext4_es_status(es
));
505 * We don't check ee_block == es->es_lblk, etc. because es
506 * might be a part of whole extent, vice versa.
508 if (es
->es_lblk
< ee_block
||
509 ext4_es_pblock(es
) != ee_start
+ es
->es_lblk
- ee_block
) {
510 pr_warn("ES insert assertion failed for inode: %lu "
511 "ex_status [%d/%d/%llu/%c] != "
512 "es_status [%d/%d/%llu/%c]\n", inode
->i_ino
,
513 ee_block
, ee_len
, ee_start
,
514 ee_status
? 'u' : 'w', es
->es_lblk
, es
->es_len
,
515 ext4_es_pblock(es
), es_status
? 'u' : 'w');
519 if (ee_status
^ es_status
) {
520 pr_warn("ES insert assertion failed for inode: %lu "
521 "ex_status [%d/%d/%llu/%c] != "
522 "es_status [%d/%d/%llu/%c]\n", inode
->i_ino
,
523 ee_block
, ee_len
, ee_start
,
524 ee_status
? 'u' : 'w', es
->es_lblk
, es
->es_len
,
525 ext4_es_pblock(es
), es_status
? 'u' : 'w');
529 * We can't find an extent on disk. So we need to make sure
530 * that we don't want to add an written/unwritten extent.
532 if (!ext4_es_is_delayed(es
) && !ext4_es_is_hole(es
)) {
533 pr_warn("ES insert assertion failed for inode: %lu "
534 "can't find an extent at block %d but we want "
535 "to add a written/unwritten extent "
536 "[%d/%d/%llu/%x]\n", inode
->i_ino
,
537 es
->es_lblk
, es
->es_lblk
, es
->es_len
,
538 ext4_es_pblock(es
), ext4_es_status(es
));
542 ext4_ext_drop_refs(path
);
546 static void ext4_es_insert_extent_ind_check(struct inode
*inode
,
547 struct extent_status
*es
)
549 struct ext4_map_blocks map
;
553 * Here we call ext4_ind_map_blocks to lookup a block mapping because
554 * 'Indirect' structure is defined in indirect.c. So we couldn't
555 * access direct/indirect tree from outside. It is too dirty to define
556 * this function in indirect.c file.
559 map
.m_lblk
= es
->es_lblk
;
560 map
.m_len
= es
->es_len
;
562 retval
= ext4_ind_map_blocks(NULL
, inode
, &map
, 0);
564 if (ext4_es_is_delayed(es
) || ext4_es_is_hole(es
)) {
566 * We want to add a delayed/hole extent but this
567 * block has been allocated.
569 pr_warn("ES insert assertion failed for inode: %lu "
570 "We can find blocks but we want to add a "
571 "delayed/hole extent [%d/%d/%llu/%x]\n",
572 inode
->i_ino
, es
->es_lblk
, es
->es_len
,
573 ext4_es_pblock(es
), ext4_es_status(es
));
575 } else if (ext4_es_is_written(es
)) {
576 if (retval
!= es
->es_len
) {
577 pr_warn("ES insert assertion failed for "
578 "inode: %lu retval %d != es_len %d\n",
579 inode
->i_ino
, retval
, es
->es_len
);
582 if (map
.m_pblk
!= ext4_es_pblock(es
)) {
583 pr_warn("ES insert assertion failed for "
584 "inode: %lu m_pblk %llu != "
586 inode
->i_ino
, map
.m_pblk
,
592 * We don't need to check unwritten extent because
593 * indirect-based file doesn't have it.
597 } else if (retval
== 0) {
598 if (ext4_es_is_written(es
)) {
599 pr_warn("ES insert assertion failed for inode: %lu "
600 "We can't find the block but we want to add "
601 "a written extent [%d/%d/%llu/%x]\n",
602 inode
->i_ino
, es
->es_lblk
, es
->es_len
,
603 ext4_es_pblock(es
), ext4_es_status(es
));
609 static inline void ext4_es_insert_extent_check(struct inode
*inode
,
610 struct extent_status
*es
)
613 * We don't need to worry about the race condition because
614 * caller takes i_data_sem locking.
616 BUG_ON(!rwsem_is_locked(&EXT4_I(inode
)->i_data_sem
));
617 if (ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
))
618 ext4_es_insert_extent_ext_check(inode
, es
);
620 ext4_es_insert_extent_ind_check(inode
, es
);
623 static inline void ext4_es_insert_extent_check(struct inode
*inode
,
624 struct extent_status
*es
)
629 static int __es_insert_extent(struct inode
*inode
, struct extent_status
*newes
)
631 struct ext4_es_tree
*tree
= &EXT4_I(inode
)->i_es_tree
;
632 struct rb_node
**p
= &tree
->root
.rb_node
;
633 struct rb_node
*parent
= NULL
;
634 struct extent_status
*es
;
638 es
= rb_entry(parent
, struct extent_status
, rb_node
);
640 if (newes
->es_lblk
< es
->es_lblk
) {
641 if (ext4_es_can_be_merged(newes
, es
)) {
643 * Here we can modify es_lblk directly
644 * because it isn't overlapped.
646 es
->es_lblk
= newes
->es_lblk
;
647 es
->es_len
+= newes
->es_len
;
648 if (ext4_es_is_written(es
) ||
649 ext4_es_is_unwritten(es
))
650 ext4_es_store_pblock(es
,
652 es
= ext4_es_try_to_merge_left(inode
, es
);
656 } else if (newes
->es_lblk
> ext4_es_end(es
)) {
657 if (ext4_es_can_be_merged(es
, newes
)) {
658 es
->es_len
+= newes
->es_len
;
659 es
= ext4_es_try_to_merge_right(inode
, es
);
669 es
= ext4_es_alloc_extent(inode
, newes
->es_lblk
, newes
->es_len
,
673 rb_link_node(&es
->rb_node
, parent
, p
);
674 rb_insert_color(&es
->rb_node
, &tree
->root
);
682 * ext4_es_insert_extent() adds information to an inode's extent
685 * Return 0 on success, error code on failure.
687 int ext4_es_insert_extent(struct inode
*inode
, ext4_lblk_t lblk
,
688 ext4_lblk_t len
, ext4_fsblk_t pblk
,
691 struct extent_status newes
;
692 ext4_lblk_t end
= lblk
+ len
- 1;
695 es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
696 lblk
, len
, pblk
, status
, inode
->i_ino
);
703 newes
.es_lblk
= lblk
;
705 ext4_es_store_pblock_status(&newes
, pblk
, status
);
706 trace_ext4_es_insert_extent(inode
, &newes
);
708 ext4_es_insert_extent_check(inode
, &newes
);
710 write_lock(&EXT4_I(inode
)->i_es_lock
);
711 err
= __es_remove_extent(inode
, lblk
, end
);
715 err
= __es_insert_extent(inode
, &newes
);
716 if (err
== -ENOMEM
&& __es_shrink(EXT4_SB(inode
->i_sb
),
719 if (err
== -ENOMEM
&& !ext4_es_is_delayed(&newes
))
723 write_unlock(&EXT4_I(inode
)->i_es_lock
);
725 ext4_es_print_tree(inode
);
731 * ext4_es_cache_extent() inserts information into the extent status
732 * tree if and only if there isn't information about the range in
735 void ext4_es_cache_extent(struct inode
*inode
, ext4_lblk_t lblk
,
736 ext4_lblk_t len
, ext4_fsblk_t pblk
,
739 struct extent_status
*es
;
740 struct extent_status newes
;
741 ext4_lblk_t end
= lblk
+ len
- 1;
743 newes
.es_lblk
= lblk
;
745 ext4_es_store_pblock_status(&newes
, pblk
, status
);
746 trace_ext4_es_cache_extent(inode
, &newes
);
753 write_lock(&EXT4_I(inode
)->i_es_lock
);
755 es
= __es_tree_search(&EXT4_I(inode
)->i_es_tree
.root
, lblk
);
756 if (!es
|| es
->es_lblk
> end
)
757 __es_insert_extent(inode
, &newes
);
758 write_unlock(&EXT4_I(inode
)->i_es_lock
);
762 * ext4_es_lookup_extent() looks up an extent in extent status tree.
764 * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
766 * Return: 1 on found, 0 on not
768 int ext4_es_lookup_extent(struct inode
*inode
, ext4_lblk_t lblk
,
769 struct extent_status
*es
)
771 struct ext4_es_tree
*tree
;
772 struct ext4_es_stats
*stats
;
773 struct extent_status
*es1
= NULL
;
774 struct rb_node
*node
;
777 trace_ext4_es_lookup_extent_enter(inode
, lblk
);
778 es_debug("lookup extent in block %u\n", lblk
);
780 tree
= &EXT4_I(inode
)->i_es_tree
;
781 read_lock(&EXT4_I(inode
)->i_es_lock
);
783 /* find extent in cache firstly */
784 es
->es_lblk
= es
->es_len
= es
->es_pblk
= 0;
785 if (tree
->cache_es
) {
786 es1
= tree
->cache_es
;
787 if (in_range(lblk
, es1
->es_lblk
, es1
->es_len
)) {
788 es_debug("%u cached by [%u/%u)\n",
789 lblk
, es1
->es_lblk
, es1
->es_len
);
795 node
= tree
->root
.rb_node
;
797 es1
= rb_entry(node
, struct extent_status
, rb_node
);
798 if (lblk
< es1
->es_lblk
)
799 node
= node
->rb_left
;
800 else if (lblk
> ext4_es_end(es1
))
801 node
= node
->rb_right
;
809 stats
= &EXT4_SB(inode
->i_sb
)->s_es_stats
;
812 es
->es_lblk
= es1
->es_lblk
;
813 es
->es_len
= es1
->es_len
;
814 es
->es_pblk
= es1
->es_pblk
;
815 stats
->es_stats_cache_hits
++;
817 stats
->es_stats_cache_misses
++;
820 read_unlock(&EXT4_I(inode
)->i_es_lock
);
822 trace_ext4_es_lookup_extent_exit(inode
, es
, found
);
826 static int __es_remove_extent(struct inode
*inode
, ext4_lblk_t lblk
,
829 struct ext4_es_tree
*tree
= &EXT4_I(inode
)->i_es_tree
;
830 struct rb_node
*node
;
831 struct extent_status
*es
;
832 struct extent_status orig_es
;
833 ext4_lblk_t len1
, len2
;
839 es
= __es_tree_search(&tree
->root
, lblk
);
842 if (es
->es_lblk
> end
)
845 /* Simply invalidate cache_es. */
846 tree
->cache_es
= NULL
;
848 orig_es
.es_lblk
= es
->es_lblk
;
849 orig_es
.es_len
= es
->es_len
;
850 orig_es
.es_pblk
= es
->es_pblk
;
852 len1
= lblk
> es
->es_lblk
? lblk
- es
->es_lblk
: 0;
853 len2
= ext4_es_end(es
) > end
? ext4_es_end(es
) - end
: 0;
858 struct extent_status newes
;
860 newes
.es_lblk
= end
+ 1;
862 block
= 0x7FDEADBEEFULL
;
863 if (ext4_es_is_written(&orig_es
) ||
864 ext4_es_is_unwritten(&orig_es
))
865 block
= ext4_es_pblock(&orig_es
) +
866 orig_es
.es_len
- len2
;
867 ext4_es_store_pblock_status(&newes
, block
,
868 ext4_es_status(&orig_es
));
869 err
= __es_insert_extent(inode
, &newes
);
871 es
->es_lblk
= orig_es
.es_lblk
;
872 es
->es_len
= orig_es
.es_len
;
873 if ((err
== -ENOMEM
) &&
874 __es_shrink(EXT4_SB(inode
->i_sb
),
880 es
->es_lblk
= end
+ 1;
882 if (ext4_es_is_written(es
) ||
883 ext4_es_is_unwritten(es
)) {
884 block
= orig_es
.es_pblk
+ orig_es
.es_len
- len2
;
885 ext4_es_store_pblock(es
, block
);
892 node
= rb_next(&es
->rb_node
);
894 es
= rb_entry(node
, struct extent_status
, rb_node
);
899 while (es
&& ext4_es_end(es
) <= end
) {
900 node
= rb_next(&es
->rb_node
);
901 rb_erase(&es
->rb_node
, &tree
->root
);
902 ext4_es_free_extent(inode
, es
);
907 es
= rb_entry(node
, struct extent_status
, rb_node
);
910 if (es
&& es
->es_lblk
< end
+ 1) {
911 ext4_lblk_t orig_len
= es
->es_len
;
913 len1
= ext4_es_end(es
) - end
;
914 es
->es_lblk
= end
+ 1;
916 if (ext4_es_is_written(es
) || ext4_es_is_unwritten(es
)) {
917 block
= es
->es_pblk
+ orig_len
- len1
;
918 ext4_es_store_pblock(es
, block
);
927 * ext4_es_remove_extent() removes a space from a extent status tree.
929 * Return 0 on success, error code on failure.
931 int ext4_es_remove_extent(struct inode
*inode
, ext4_lblk_t lblk
,
937 trace_ext4_es_remove_extent(inode
, lblk
, len
);
938 es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
939 lblk
, len
, inode
->i_ino
);
944 end
= lblk
+ len
- 1;
948 * ext4_clear_inode() depends on us taking i_es_lock unconditionally
949 * so that we are sure __es_shrink() is done with the inode before it
952 write_lock(&EXT4_I(inode
)->i_es_lock
);
953 err
= __es_remove_extent(inode
, lblk
, end
);
954 write_unlock(&EXT4_I(inode
)->i_es_lock
);
955 ext4_es_print_tree(inode
);
959 static int __es_shrink(struct ext4_sb_info
*sbi
, int nr_to_scan
,
960 struct ext4_inode_info
*locked_ei
)
962 struct ext4_inode_info
*ei
;
963 struct ext4_es_stats
*es_stats
;
968 int retried
= 0, nr_skipped
= 0;
970 es_stats
= &sbi
->s_es_stats
;
971 start_time
= ktime_get();
974 spin_lock(&sbi
->s_es_lock
);
975 nr_to_walk
= sbi
->s_es_nr_inode
;
976 while (nr_to_walk
-- > 0) {
979 if (list_empty(&sbi
->s_es_list
)) {
980 spin_unlock(&sbi
->s_es_lock
);
983 ei
= list_first_entry(&sbi
->s_es_list
, struct ext4_inode_info
,
985 /* Move the inode to the tail */
986 list_move(&ei
->i_es_list
, sbi
->s_es_list
.prev
);
989 * Normally we try hard to avoid shrinking precached inodes,
990 * but we will as a last resort.
992 if (!retried
&& ext4_test_inode_state(&ei
->vfs_inode
,
993 EXT4_STATE_EXT_PRECACHED
)) {
998 if (ei
== locked_ei
|| !write_trylock(&ei
->i_es_lock
)) {
1003 * Now we hold i_es_lock which protects us from inode reclaim
1004 * freeing inode under us
1006 spin_unlock(&sbi
->s_es_lock
);
1008 shrunk
= __es_try_to_reclaim_extents(ei
, nr_to_scan
);
1009 write_unlock(&ei
->i_es_lock
);
1011 nr_shrunk
+= shrunk
;
1012 nr_to_scan
-= shrunk
;
1014 if (nr_to_scan
== 0)
1016 spin_lock(&sbi
->s_es_lock
);
1018 spin_unlock(&sbi
->s_es_lock
);
1021 * If we skipped any inodes, and we weren't able to make any
1022 * forward progress, try again to scan precached inodes.
1024 if ((nr_shrunk
== 0) && nr_skipped
&& !retried
) {
1029 if (locked_ei
&& nr_shrunk
== 0)
1030 nr_shrunk
= __es_try_to_reclaim_extents(locked_ei
, nr_to_scan
);
1033 scan_time
= ktime_to_ns(ktime_sub(ktime_get(), start_time
));
1034 if (likely(es_stats
->es_stats_scan_time
))
1035 es_stats
->es_stats_scan_time
= (scan_time
+
1036 es_stats
->es_stats_scan_time
*3) / 4;
1038 es_stats
->es_stats_scan_time
= scan_time
;
1039 if (scan_time
> es_stats
->es_stats_max_scan_time
)
1040 es_stats
->es_stats_max_scan_time
= scan_time
;
1041 if (likely(es_stats
->es_stats_shrunk
))
1042 es_stats
->es_stats_shrunk
= (nr_shrunk
+
1043 es_stats
->es_stats_shrunk
*3) / 4;
1045 es_stats
->es_stats_shrunk
= nr_shrunk
;
1047 trace_ext4_es_shrink(sbi
->s_sb
, nr_shrunk
, scan_time
,
1048 nr_skipped
, retried
);
1052 static unsigned long ext4_es_count(struct shrinker
*shrink
,
1053 struct shrink_control
*sc
)
1056 struct ext4_sb_info
*sbi
;
1058 sbi
= container_of(shrink
, struct ext4_sb_info
, s_es_shrinker
);
1059 nr
= percpu_counter_read_positive(&sbi
->s_es_stats
.es_stats_shk_cnt
);
1060 trace_ext4_es_shrink_count(sbi
->s_sb
, sc
->nr_to_scan
, nr
);
1064 static unsigned long ext4_es_scan(struct shrinker
*shrink
,
1065 struct shrink_control
*sc
)
1067 struct ext4_sb_info
*sbi
= container_of(shrink
,
1068 struct ext4_sb_info
, s_es_shrinker
);
1069 int nr_to_scan
= sc
->nr_to_scan
;
1072 ret
= percpu_counter_read_positive(&sbi
->s_es_stats
.es_stats_shk_cnt
);
1073 trace_ext4_es_shrink_scan_enter(sbi
->s_sb
, nr_to_scan
, ret
);
1078 nr_shrunk
= __es_shrink(sbi
, nr_to_scan
, NULL
);
1080 trace_ext4_es_shrink_scan_exit(sbi
->s_sb
, nr_shrunk
, ret
);
1084 static void *ext4_es_seq_shrinker_info_start(struct seq_file
*seq
, loff_t
*pos
)
1086 return *pos
? NULL
: SEQ_START_TOKEN
;
1090 ext4_es_seq_shrinker_info_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1095 static int ext4_es_seq_shrinker_info_show(struct seq_file
*seq
, void *v
)
1097 struct ext4_sb_info
*sbi
= seq
->private;
1098 struct ext4_es_stats
*es_stats
= &sbi
->s_es_stats
;
1099 struct ext4_inode_info
*ei
, *max
= NULL
;
1100 unsigned int inode_cnt
= 0;
1102 if (v
!= SEQ_START_TOKEN
)
1105 /* here we just find an inode that has the max nr. of objects */
1106 spin_lock(&sbi
->s_es_lock
);
1107 list_for_each_entry(ei
, &sbi
->s_es_list
, i_es_list
) {
1109 if (max
&& max
->i_es_all_nr
< ei
->i_es_all_nr
)
1114 spin_unlock(&sbi
->s_es_lock
);
1116 seq_printf(seq
, "stats:\n %lld objects\n %lld reclaimable objects\n",
1117 percpu_counter_sum_positive(&es_stats
->es_stats_all_cnt
),
1118 percpu_counter_sum_positive(&es_stats
->es_stats_shk_cnt
));
1119 seq_printf(seq
, " %lu/%lu cache hits/misses\n",
1120 es_stats
->es_stats_cache_hits
,
1121 es_stats
->es_stats_cache_misses
);
1123 seq_printf(seq
, " %d inodes on list\n", inode_cnt
);
1125 seq_printf(seq
, "average:\n %llu us scan time\n",
1126 div_u64(es_stats
->es_stats_scan_time
, 1000));
1127 seq_printf(seq
, " %lu shrunk objects\n", es_stats
->es_stats_shrunk
);
1130 "maximum:\n %lu inode (%u objects, %u reclaimable)\n"
1131 " %llu us max scan time\n",
1132 max
->vfs_inode
.i_ino
, max
->i_es_all_nr
, max
->i_es_shk_nr
,
1133 div_u64(es_stats
->es_stats_max_scan_time
, 1000));
1138 static void ext4_es_seq_shrinker_info_stop(struct seq_file
*seq
, void *v
)
1142 static const struct seq_operations ext4_es_seq_shrinker_info_ops
= {
1143 .start
= ext4_es_seq_shrinker_info_start
,
1144 .next
= ext4_es_seq_shrinker_info_next
,
1145 .stop
= ext4_es_seq_shrinker_info_stop
,
1146 .show
= ext4_es_seq_shrinker_info_show
,
1150 ext4_es_seq_shrinker_info_open(struct inode
*inode
, struct file
*file
)
1154 ret
= seq_open(file
, &ext4_es_seq_shrinker_info_ops
);
1156 struct seq_file
*m
= file
->private_data
;
1157 m
->private = PDE_DATA(inode
);
1164 ext4_es_seq_shrinker_info_release(struct inode
*inode
, struct file
*file
)
1166 return seq_release(inode
, file
);
1169 static const struct file_operations ext4_es_seq_shrinker_info_fops
= {
1170 .owner
= THIS_MODULE
,
1171 .open
= ext4_es_seq_shrinker_info_open
,
1173 .llseek
= seq_lseek
,
1174 .release
= ext4_es_seq_shrinker_info_release
,
1177 int ext4_es_register_shrinker(struct ext4_sb_info
*sbi
)
1181 INIT_LIST_HEAD(&sbi
->s_es_list
);
1182 sbi
->s_es_nr_inode
= 0;
1183 spin_lock_init(&sbi
->s_es_lock
);
1184 sbi
->s_es_stats
.es_stats_shrunk
= 0;
1185 sbi
->s_es_stats
.es_stats_cache_hits
= 0;
1186 sbi
->s_es_stats
.es_stats_cache_misses
= 0;
1187 sbi
->s_es_stats
.es_stats_scan_time
= 0;
1188 sbi
->s_es_stats
.es_stats_max_scan_time
= 0;
1189 err
= percpu_counter_init(&sbi
->s_es_stats
.es_stats_all_cnt
, 0, GFP_KERNEL
);
1192 err
= percpu_counter_init(&sbi
->s_es_stats
.es_stats_shk_cnt
, 0, GFP_KERNEL
);
1196 sbi
->s_es_shrinker
.scan_objects
= ext4_es_scan
;
1197 sbi
->s_es_shrinker
.count_objects
= ext4_es_count
;
1198 sbi
->s_es_shrinker
.seeks
= DEFAULT_SEEKS
;
1199 err
= register_shrinker(&sbi
->s_es_shrinker
);
1204 proc_create_data("es_shrinker_info", S_IRUGO
, sbi
->s_proc
,
1205 &ext4_es_seq_shrinker_info_fops
, sbi
);
1210 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_shk_cnt
);
1212 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_all_cnt
);
1216 void ext4_es_unregister_shrinker(struct ext4_sb_info
*sbi
)
1219 remove_proc_entry("es_shrinker_info", sbi
->s_proc
);
1220 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_all_cnt
);
1221 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_shk_cnt
);
1222 unregister_shrinker(&sbi
->s_es_shrinker
);
1225 static int __es_try_to_reclaim_extents(struct ext4_inode_info
*ei
,
1228 struct inode
*inode
= &ei
->vfs_inode
;
1229 struct ext4_es_tree
*tree
= &ei
->i_es_tree
;
1230 struct rb_node
*node
;
1231 struct extent_status
*es
;
1232 unsigned long nr_shrunk
= 0;
1233 static DEFINE_RATELIMIT_STATE(_rs
, DEFAULT_RATELIMIT_INTERVAL
,
1234 DEFAULT_RATELIMIT_BURST
);
1236 if (ei
->i_es_shk_nr
== 0)
1239 if (ext4_test_inode_state(inode
, EXT4_STATE_EXT_PRECACHED
) &&
1241 ext4_warning(inode
->i_sb
, "forced shrink of precached extents");
1243 node
= rb_first(&tree
->root
);
1244 while (node
!= NULL
) {
1245 es
= rb_entry(node
, struct extent_status
, rb_node
);
1246 node
= rb_next(&es
->rb_node
);
1248 * We can't reclaim delayed extent from status tree because
1249 * fiemap, bigallic, and seek_data/hole need to use it.
1251 if (!ext4_es_is_delayed(es
)) {
1252 rb_erase(&es
->rb_node
, &tree
->root
);
1253 ext4_es_free_extent(inode
, es
);
1255 if (--nr_to_scan
== 0)
1259 tree
->cache_es
= NULL
;