2 * fs/ext4/extents_status.c
4 * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
6 * Allison Henderson <achender@linux.vnet.ibm.com>
7 * Hugh Dickins <hughd@google.com>
8 * Zheng Liu <wenqing.lz@taobao.com>
10 * Ext4 extents status tree core functions.
12 #include <linux/rbtree.h>
13 #include <linux/list_sort.h>
14 #include <linux/proc_fs.h>
15 #include <linux/seq_file.h>
17 #include "extents_status.h"
19 #include <trace/events/ext4.h>
22 * According to previous discussion in Ext4 Developer Workshop, we
23 * will introduce a new structure called io tree to track all extent
24 * status in order to solve some problems that we have met
25 * (e.g. Reservation space warning), and provide extent-level locking.
26 * Delay extent tree is the first step to achieve this goal. It is
27 * original built by Yongqiang Yang. At that time it is called delay
28 * extent tree, whose goal is only track delayed extents in memory to
29 * simplify the implementation of fiemap and bigalloc, and introduce
30 * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called
31 * delay extent tree at the first commit. But for better understand
32 * what it does, it has been rename to extent status tree.
35 * Currently the first step has been done. All delayed extents are
36 * tracked in the tree. It maintains the delayed extent when a delayed
37 * allocation is issued, and the delayed extent is written out or
38 * invalidated. Therefore the implementation of fiemap and bigalloc
39 * are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
41 * The following comment describes the implemenmtation of extent
42 * status tree and future works.
45 * In this step all extent status are tracked by extent status tree.
46 * Thus, we can first try to lookup a block mapping in this tree before
47 * finding it in extent tree. Hence, single extent cache can be removed
48 * because extent status tree can do a better job. Extents in status
49 * tree are loaded on-demand. Therefore, the extent status tree may not
50 * contain all of the extents in a file. Meanwhile we define a shrinker
51 * to reclaim memory from extent status tree because fragmented extent
52 * tree will make status tree cost too much memory. written/unwritten/-
53 * hole extents in the tree will be reclaimed by this shrinker when we
54 * are under high memory pressure. Delayed extents will not be
55 * reclimed because fiemap, bigalloc, and seek_data/hole need it.
59 * Extent status tree implementation for ext4.
62 * ==========================================================================
63 * Extent status tree tracks all extent status.
65 * 1. Why we need to implement extent status tree?
67 * Without extent status tree, ext4 identifies a delayed extent by looking
68 * up page cache, this has several deficiencies - complicated, buggy,
69 * and inefficient code.
71 * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a
72 * block or a range of blocks are belonged to a delayed extent.
74 * Let us have a look at how they do without extent status tree.
76 * FIEMAP looks up page cache to identify delayed allocations from holes.
79 * SEEK_HOLE/DATA has the same problem as FIEMAP.
82 * bigalloc looks up page cache to figure out if a block is
83 * already under delayed allocation or not to determine whether
84 * quota reserving is needed for the cluster.
87 * Writeout looks up whole page cache to see if a buffer is
88 * mapped, If there are not very many delayed buffers, then it is
91 * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA,
92 * bigalloc and writeout can figure out if a block or a range of
93 * blocks is under delayed allocation(belonged to a delayed extent) or
94 * not by searching the extent tree.
97 * ==========================================================================
98 * 2. Ext4 extent status tree impelmentation
101 * A extent is a range of blocks which are contiguous logically and
102 * physically. Unlike extent in extent tree, this extent in ext4 is
103 * a in-memory struct, there is no corresponding on-disk data. There
104 * is no limit on length of extent, so an extent can contain as many
105 * blocks as they are contiguous logically and physically.
107 * -- extent status tree
108 * Every inode has an extent status tree and all allocation blocks
109 * are added to the tree with different status. The extent in the
110 * tree are ordered by logical block no.
112 * -- operations on a extent status tree
113 * There are three important operations on a delayed extent tree: find
114 * next extent, adding a extent(a range of blocks) and removing a extent.
116 * -- race on a extent status tree
117 * Extent status tree is protected by inode->i_es_lock.
119 * -- memory consumption
120 * Fragmented extent tree will make extent status tree cost too much
121 * memory. Hence, we will reclaim written/unwritten/hole extents from
122 * the tree under a heavy memory pressure.
125 * ==========================================================================
126 * 3. Performance analysis
129 * 1. There is a cache extent for write access, so if writes are
130 * not very random, adding space operaions are in O(1) time.
133 * 2. Code is much simpler, more readable, more maintainable and
137 * ==========================================================================
140 * -- Refactor delayed space reservation
142 * -- Extent-level locking
145 static struct kmem_cache
*ext4_es_cachep
;
147 static int __es_insert_extent(struct inode
*inode
, struct extent_status
*newes
);
148 static int __es_remove_extent(struct inode
*inode
, ext4_lblk_t lblk
,
150 static int es_reclaim_extents(struct ext4_inode_info
*ei
, int *nr_to_scan
);
151 static int __es_shrink(struct ext4_sb_info
*sbi
, int nr_to_scan
,
152 struct ext4_inode_info
*locked_ei
);
154 int __init
ext4_init_es(void)
156 ext4_es_cachep
= kmem_cache_create("ext4_extent_status",
157 sizeof(struct extent_status
),
158 0, (SLAB_RECLAIM_ACCOUNT
), NULL
);
159 if (ext4_es_cachep
== NULL
)
164 void ext4_exit_es(void)
167 kmem_cache_destroy(ext4_es_cachep
);
170 void ext4_es_init_tree(struct ext4_es_tree
*tree
)
172 tree
->root
= RB_ROOT
;
173 tree
->cache_es
= NULL
;
177 static void ext4_es_print_tree(struct inode
*inode
)
179 struct ext4_es_tree
*tree
;
180 struct rb_node
*node
;
182 printk(KERN_DEBUG
"status extents for inode %lu:", inode
->i_ino
);
183 tree
= &EXT4_I(inode
)->i_es_tree
;
184 node
= rb_first(&tree
->root
);
186 struct extent_status
*es
;
187 es
= rb_entry(node
, struct extent_status
, rb_node
);
188 printk(KERN_DEBUG
" [%u/%u) %llu %x",
189 es
->es_lblk
, es
->es_len
,
190 ext4_es_pblock(es
), ext4_es_status(es
));
191 node
= rb_next(node
);
193 printk(KERN_DEBUG
"\n");
196 #define ext4_es_print_tree(inode)
199 static inline ext4_lblk_t
ext4_es_end(struct extent_status
*es
)
201 BUG_ON(es
->es_lblk
+ es
->es_len
< es
->es_lblk
);
202 return es
->es_lblk
+ es
->es_len
- 1;
206 * search through the tree for an delayed extent with a given offset. If
207 * it can't be found, try to find next extent.
209 static struct extent_status
*__es_tree_search(struct rb_root
*root
,
212 struct rb_node
*node
= root
->rb_node
;
213 struct extent_status
*es
= NULL
;
216 es
= rb_entry(node
, struct extent_status
, rb_node
);
217 if (lblk
< es
->es_lblk
)
218 node
= node
->rb_left
;
219 else if (lblk
> ext4_es_end(es
))
220 node
= node
->rb_right
;
225 if (es
&& lblk
< es
->es_lblk
)
228 if (es
&& lblk
> ext4_es_end(es
)) {
229 node
= rb_next(&es
->rb_node
);
230 return node
? rb_entry(node
, struct extent_status
, rb_node
) :
238 * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering
239 * @es->lblk if it exists, otherwise, the next extent after @es->lblk.
241 * @inode: the inode which owns delayed extents
242 * @lblk: the offset where we start to search
243 * @end: the offset where we stop to search
244 * @es: delayed extent that we found
246 void ext4_es_find_delayed_extent_range(struct inode
*inode
,
247 ext4_lblk_t lblk
, ext4_lblk_t end
,
248 struct extent_status
*es
)
250 struct ext4_es_tree
*tree
= NULL
;
251 struct extent_status
*es1
= NULL
;
252 struct rb_node
*node
;
256 trace_ext4_es_find_delayed_extent_range_enter(inode
, lblk
);
258 read_lock(&EXT4_I(inode
)->i_es_lock
);
259 tree
= &EXT4_I(inode
)->i_es_tree
;
261 /* find extent in cache firstly */
262 es
->es_lblk
= es
->es_len
= es
->es_pblk
= 0;
263 if (tree
->cache_es
) {
264 es1
= tree
->cache_es
;
265 if (in_range(lblk
, es1
->es_lblk
, es1
->es_len
)) {
266 es_debug("%u cached by [%u/%u) %llu %x\n",
267 lblk
, es1
->es_lblk
, es1
->es_len
,
268 ext4_es_pblock(es1
), ext4_es_status(es1
));
273 es1
= __es_tree_search(&tree
->root
, lblk
);
276 if (es1
&& !ext4_es_is_delayed(es1
)) {
277 while ((node
= rb_next(&es1
->rb_node
)) != NULL
) {
278 es1
= rb_entry(node
, struct extent_status
, rb_node
);
279 if (es1
->es_lblk
> end
) {
283 if (ext4_es_is_delayed(es1
))
288 if (es1
&& ext4_es_is_delayed(es1
)) {
289 tree
->cache_es
= es1
;
290 es
->es_lblk
= es1
->es_lblk
;
291 es
->es_len
= es1
->es_len
;
292 es
->es_pblk
= es1
->es_pblk
;
295 read_unlock(&EXT4_I(inode
)->i_es_lock
);
297 trace_ext4_es_find_delayed_extent_range_exit(inode
, es
);
300 static void ext4_es_list_add(struct inode
*inode
)
302 struct ext4_inode_info
*ei
= EXT4_I(inode
);
303 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
305 if (!list_empty(&ei
->i_es_list
))
308 spin_lock(&sbi
->s_es_lock
);
309 if (list_empty(&ei
->i_es_list
)) {
310 list_add_tail(&ei
->i_es_list
, &sbi
->s_es_list
);
311 sbi
->s_es_nr_inode
++;
313 spin_unlock(&sbi
->s_es_lock
);
316 static void ext4_es_list_del(struct inode
*inode
)
318 struct ext4_inode_info
*ei
= EXT4_I(inode
);
319 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
321 spin_lock(&sbi
->s_es_lock
);
322 if (!list_empty(&ei
->i_es_list
)) {
323 list_del_init(&ei
->i_es_list
);
324 sbi
->s_es_nr_inode
--;
325 WARN_ON_ONCE(sbi
->s_es_nr_inode
< 0);
327 spin_unlock(&sbi
->s_es_lock
);
330 static struct extent_status
*
331 ext4_es_alloc_extent(struct inode
*inode
, ext4_lblk_t lblk
, ext4_lblk_t len
,
334 struct extent_status
*es
;
335 es
= kmem_cache_alloc(ext4_es_cachep
, GFP_ATOMIC
);
343 * We don't count delayed extent because we never try to reclaim them
345 if (!ext4_es_is_delayed(es
)) {
346 if (!EXT4_I(inode
)->i_es_shk_nr
++)
347 ext4_es_list_add(inode
);
348 percpu_counter_inc(&EXT4_SB(inode
->i_sb
)->
349 s_es_stats
.es_stats_shk_cnt
);
352 EXT4_I(inode
)->i_es_all_nr
++;
353 percpu_counter_inc(&EXT4_SB(inode
->i_sb
)->s_es_stats
.es_stats_all_cnt
);
358 static void ext4_es_free_extent(struct inode
*inode
, struct extent_status
*es
)
360 EXT4_I(inode
)->i_es_all_nr
--;
361 percpu_counter_dec(&EXT4_SB(inode
->i_sb
)->s_es_stats
.es_stats_all_cnt
);
363 /* Decrease the shrink counter when this es is not delayed */
364 if (!ext4_es_is_delayed(es
)) {
365 BUG_ON(EXT4_I(inode
)->i_es_shk_nr
== 0);
366 if (!--EXT4_I(inode
)->i_es_shk_nr
)
367 ext4_es_list_del(inode
);
368 percpu_counter_dec(&EXT4_SB(inode
->i_sb
)->
369 s_es_stats
.es_stats_shk_cnt
);
372 kmem_cache_free(ext4_es_cachep
, es
);
376 * Check whether or not two extents can be merged
378 * - logical block number is contiguous
379 * - physical block number is contiguous
382 static int ext4_es_can_be_merged(struct extent_status
*es1
,
383 struct extent_status
*es2
)
385 if (ext4_es_type(es1
) != ext4_es_type(es2
))
388 if (((__u64
) es1
->es_len
) + es2
->es_len
> EXT_MAX_BLOCKS
) {
389 pr_warn("ES assertion failed when merging extents. "
390 "The sum of lengths of es1 (%d) and es2 (%d) "
391 "is bigger than allowed file size (%d)\n",
392 es1
->es_len
, es2
->es_len
, EXT_MAX_BLOCKS
);
397 if (((__u64
) es1
->es_lblk
) + es1
->es_len
!= es2
->es_lblk
)
400 if ((ext4_es_is_written(es1
) || ext4_es_is_unwritten(es1
)) &&
401 (ext4_es_pblock(es1
) + es1
->es_len
== ext4_es_pblock(es2
)))
404 if (ext4_es_is_hole(es1
))
407 /* we need to check delayed extent is without unwritten status */
408 if (ext4_es_is_delayed(es1
) && !ext4_es_is_unwritten(es1
))
414 static struct extent_status
*
415 ext4_es_try_to_merge_left(struct inode
*inode
, struct extent_status
*es
)
417 struct ext4_es_tree
*tree
= &EXT4_I(inode
)->i_es_tree
;
418 struct extent_status
*es1
;
419 struct rb_node
*node
;
421 node
= rb_prev(&es
->rb_node
);
425 es1
= rb_entry(node
, struct extent_status
, rb_node
);
426 if (ext4_es_can_be_merged(es1
, es
)) {
427 es1
->es_len
+= es
->es_len
;
428 if (ext4_es_is_referenced(es
))
429 ext4_es_set_referenced(es1
);
430 rb_erase(&es
->rb_node
, &tree
->root
);
431 ext4_es_free_extent(inode
, es
);
438 static struct extent_status
*
439 ext4_es_try_to_merge_right(struct inode
*inode
, struct extent_status
*es
)
441 struct ext4_es_tree
*tree
= &EXT4_I(inode
)->i_es_tree
;
442 struct extent_status
*es1
;
443 struct rb_node
*node
;
445 node
= rb_next(&es
->rb_node
);
449 es1
= rb_entry(node
, struct extent_status
, rb_node
);
450 if (ext4_es_can_be_merged(es
, es1
)) {
451 es
->es_len
+= es1
->es_len
;
452 if (ext4_es_is_referenced(es1
))
453 ext4_es_set_referenced(es
);
454 rb_erase(node
, &tree
->root
);
455 ext4_es_free_extent(inode
, es1
);
461 #ifdef ES_AGGRESSIVE_TEST
462 #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */
464 static void ext4_es_insert_extent_ext_check(struct inode
*inode
,
465 struct extent_status
*es
)
467 struct ext4_ext_path
*path
= NULL
;
468 struct ext4_extent
*ex
;
469 ext4_lblk_t ee_block
;
470 ext4_fsblk_t ee_start
;
471 unsigned short ee_len
;
472 int depth
, ee_status
, es_status
;
474 path
= ext4_find_extent(inode
, es
->es_lblk
, NULL
, EXT4_EX_NOCACHE
);
478 depth
= ext_depth(inode
);
479 ex
= path
[depth
].p_ext
;
483 ee_block
= le32_to_cpu(ex
->ee_block
);
484 ee_start
= ext4_ext_pblock(ex
);
485 ee_len
= ext4_ext_get_actual_len(ex
);
487 ee_status
= ext4_ext_is_unwritten(ex
) ? 1 : 0;
488 es_status
= ext4_es_is_unwritten(es
) ? 1 : 0;
491 * Make sure ex and es are not overlap when we try to insert
492 * a delayed/hole extent.
494 if (!ext4_es_is_written(es
) && !ext4_es_is_unwritten(es
)) {
495 if (in_range(es
->es_lblk
, ee_block
, ee_len
)) {
496 pr_warn("ES insert assertion failed for "
497 "inode: %lu we can find an extent "
498 "at block [%d/%d/%llu/%c], but we "
499 "want to add a delayed/hole extent "
501 inode
->i_ino
, ee_block
, ee_len
,
502 ee_start
, ee_status
? 'u' : 'w',
503 es
->es_lblk
, es
->es_len
,
504 ext4_es_pblock(es
), ext4_es_status(es
));
510 * We don't check ee_block == es->es_lblk, etc. because es
511 * might be a part of whole extent, vice versa.
513 if (es
->es_lblk
< ee_block
||
514 ext4_es_pblock(es
) != ee_start
+ es
->es_lblk
- ee_block
) {
515 pr_warn("ES insert assertion failed for inode: %lu "
516 "ex_status [%d/%d/%llu/%c] != "
517 "es_status [%d/%d/%llu/%c]\n", inode
->i_ino
,
518 ee_block
, ee_len
, ee_start
,
519 ee_status
? 'u' : 'w', es
->es_lblk
, es
->es_len
,
520 ext4_es_pblock(es
), es_status
? 'u' : 'w');
524 if (ee_status
^ es_status
) {
525 pr_warn("ES insert assertion failed for inode: %lu "
526 "ex_status [%d/%d/%llu/%c] != "
527 "es_status [%d/%d/%llu/%c]\n", inode
->i_ino
,
528 ee_block
, ee_len
, ee_start
,
529 ee_status
? 'u' : 'w', es
->es_lblk
, es
->es_len
,
530 ext4_es_pblock(es
), es_status
? 'u' : 'w');
534 * We can't find an extent on disk. So we need to make sure
535 * that we don't want to add an written/unwritten extent.
537 if (!ext4_es_is_delayed(es
) && !ext4_es_is_hole(es
)) {
538 pr_warn("ES insert assertion failed for inode: %lu "
539 "can't find an extent at block %d but we want "
540 "to add a written/unwritten extent "
541 "[%d/%d/%llu/%x]\n", inode
->i_ino
,
542 es
->es_lblk
, es
->es_lblk
, es
->es_len
,
543 ext4_es_pblock(es
), ext4_es_status(es
));
547 ext4_ext_drop_refs(path
);
551 static void ext4_es_insert_extent_ind_check(struct inode
*inode
,
552 struct extent_status
*es
)
554 struct ext4_map_blocks map
;
558 * Here we call ext4_ind_map_blocks to lookup a block mapping because
559 * 'Indirect' structure is defined in indirect.c. So we couldn't
560 * access direct/indirect tree from outside. It is too dirty to define
561 * this function in indirect.c file.
564 map
.m_lblk
= es
->es_lblk
;
565 map
.m_len
= es
->es_len
;
567 retval
= ext4_ind_map_blocks(NULL
, inode
, &map
, 0);
569 if (ext4_es_is_delayed(es
) || ext4_es_is_hole(es
)) {
571 * We want to add a delayed/hole extent but this
572 * block has been allocated.
574 pr_warn("ES insert assertion failed for inode: %lu "
575 "We can find blocks but we want to add a "
576 "delayed/hole extent [%d/%d/%llu/%x]\n",
577 inode
->i_ino
, es
->es_lblk
, es
->es_len
,
578 ext4_es_pblock(es
), ext4_es_status(es
));
580 } else if (ext4_es_is_written(es
)) {
581 if (retval
!= es
->es_len
) {
582 pr_warn("ES insert assertion failed for "
583 "inode: %lu retval %d != es_len %d\n",
584 inode
->i_ino
, retval
, es
->es_len
);
587 if (map
.m_pblk
!= ext4_es_pblock(es
)) {
588 pr_warn("ES insert assertion failed for "
589 "inode: %lu m_pblk %llu != "
591 inode
->i_ino
, map
.m_pblk
,
597 * We don't need to check unwritten extent because
598 * indirect-based file doesn't have it.
602 } else if (retval
== 0) {
603 if (ext4_es_is_written(es
)) {
604 pr_warn("ES insert assertion failed for inode: %lu "
605 "We can't find the block but we want to add "
606 "a written extent [%d/%d/%llu/%x]\n",
607 inode
->i_ino
, es
->es_lblk
, es
->es_len
,
608 ext4_es_pblock(es
), ext4_es_status(es
));
614 static inline void ext4_es_insert_extent_check(struct inode
*inode
,
615 struct extent_status
*es
)
618 * We don't need to worry about the race condition because
619 * caller takes i_data_sem locking.
621 BUG_ON(!rwsem_is_locked(&EXT4_I(inode
)->i_data_sem
));
622 if (ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
))
623 ext4_es_insert_extent_ext_check(inode
, es
);
625 ext4_es_insert_extent_ind_check(inode
, es
);
628 static inline void ext4_es_insert_extent_check(struct inode
*inode
,
629 struct extent_status
*es
)
634 static int __es_insert_extent(struct inode
*inode
, struct extent_status
*newes
)
636 struct ext4_es_tree
*tree
= &EXT4_I(inode
)->i_es_tree
;
637 struct rb_node
**p
= &tree
->root
.rb_node
;
638 struct rb_node
*parent
= NULL
;
639 struct extent_status
*es
;
643 es
= rb_entry(parent
, struct extent_status
, rb_node
);
645 if (newes
->es_lblk
< es
->es_lblk
) {
646 if (ext4_es_can_be_merged(newes
, es
)) {
648 * Here we can modify es_lblk directly
649 * because it isn't overlapped.
651 es
->es_lblk
= newes
->es_lblk
;
652 es
->es_len
+= newes
->es_len
;
653 if (ext4_es_is_written(es
) ||
654 ext4_es_is_unwritten(es
))
655 ext4_es_store_pblock(es
,
657 es
= ext4_es_try_to_merge_left(inode
, es
);
661 } else if (newes
->es_lblk
> ext4_es_end(es
)) {
662 if (ext4_es_can_be_merged(es
, newes
)) {
663 es
->es_len
+= newes
->es_len
;
664 es
= ext4_es_try_to_merge_right(inode
, es
);
674 es
= ext4_es_alloc_extent(inode
, newes
->es_lblk
, newes
->es_len
,
678 rb_link_node(&es
->rb_node
, parent
, p
);
679 rb_insert_color(&es
->rb_node
, &tree
->root
);
687 * ext4_es_insert_extent() adds information to an inode's extent
690 * Return 0 on success, error code on failure.
692 int ext4_es_insert_extent(struct inode
*inode
, ext4_lblk_t lblk
,
693 ext4_lblk_t len
, ext4_fsblk_t pblk
,
696 struct extent_status newes
;
697 ext4_lblk_t end
= lblk
+ len
- 1;
700 es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
701 lblk
, len
, pblk
, status
, inode
->i_ino
);
708 newes
.es_lblk
= lblk
;
710 ext4_es_store_pblock_status(&newes
, pblk
, status
);
711 trace_ext4_es_insert_extent(inode
, &newes
);
713 ext4_es_insert_extent_check(inode
, &newes
);
715 write_lock(&EXT4_I(inode
)->i_es_lock
);
716 err
= __es_remove_extent(inode
, lblk
, end
);
720 err
= __es_insert_extent(inode
, &newes
);
721 if (err
== -ENOMEM
&& __es_shrink(EXT4_SB(inode
->i_sb
),
724 if (err
== -ENOMEM
&& !ext4_es_is_delayed(&newes
))
728 write_unlock(&EXT4_I(inode
)->i_es_lock
);
730 ext4_es_print_tree(inode
);
736 * ext4_es_cache_extent() inserts information into the extent status
737 * tree if and only if there isn't information about the range in
740 void ext4_es_cache_extent(struct inode
*inode
, ext4_lblk_t lblk
,
741 ext4_lblk_t len
, ext4_fsblk_t pblk
,
744 struct extent_status
*es
;
745 struct extent_status newes
;
746 ext4_lblk_t end
= lblk
+ len
- 1;
748 newes
.es_lblk
= lblk
;
750 ext4_es_store_pblock_status(&newes
, pblk
, status
);
751 trace_ext4_es_cache_extent(inode
, &newes
);
758 write_lock(&EXT4_I(inode
)->i_es_lock
);
760 es
= __es_tree_search(&EXT4_I(inode
)->i_es_tree
.root
, lblk
);
761 if (!es
|| es
->es_lblk
> end
)
762 __es_insert_extent(inode
, &newes
);
763 write_unlock(&EXT4_I(inode
)->i_es_lock
);
767 * ext4_es_lookup_extent() looks up an extent in extent status tree.
769 * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
771 * Return: 1 on found, 0 on not
773 int ext4_es_lookup_extent(struct inode
*inode
, ext4_lblk_t lblk
,
774 struct extent_status
*es
)
776 struct ext4_es_tree
*tree
;
777 struct ext4_es_stats
*stats
;
778 struct extent_status
*es1
= NULL
;
779 struct rb_node
*node
;
782 trace_ext4_es_lookup_extent_enter(inode
, lblk
);
783 es_debug("lookup extent in block %u\n", lblk
);
785 tree
= &EXT4_I(inode
)->i_es_tree
;
786 read_lock(&EXT4_I(inode
)->i_es_lock
);
788 /* find extent in cache firstly */
789 es
->es_lblk
= es
->es_len
= es
->es_pblk
= 0;
790 if (tree
->cache_es
) {
791 es1
= tree
->cache_es
;
792 if (in_range(lblk
, es1
->es_lblk
, es1
->es_len
)) {
793 es_debug("%u cached by [%u/%u)\n",
794 lblk
, es1
->es_lblk
, es1
->es_len
);
800 node
= tree
->root
.rb_node
;
802 es1
= rb_entry(node
, struct extent_status
, rb_node
);
803 if (lblk
< es1
->es_lblk
)
804 node
= node
->rb_left
;
805 else if (lblk
> ext4_es_end(es1
))
806 node
= node
->rb_right
;
814 stats
= &EXT4_SB(inode
->i_sb
)->s_es_stats
;
817 es
->es_lblk
= es1
->es_lblk
;
818 es
->es_len
= es1
->es_len
;
819 es
->es_pblk
= es1
->es_pblk
;
820 if (!ext4_es_is_referenced(es
))
821 ext4_es_set_referenced(es
);
822 stats
->es_stats_cache_hits
++;
824 stats
->es_stats_cache_misses
++;
827 read_unlock(&EXT4_I(inode
)->i_es_lock
);
829 trace_ext4_es_lookup_extent_exit(inode
, es
, found
);
833 static int __es_remove_extent(struct inode
*inode
, ext4_lblk_t lblk
,
836 struct ext4_es_tree
*tree
= &EXT4_I(inode
)->i_es_tree
;
837 struct rb_node
*node
;
838 struct extent_status
*es
;
839 struct extent_status orig_es
;
840 ext4_lblk_t len1
, len2
;
846 es
= __es_tree_search(&tree
->root
, lblk
);
849 if (es
->es_lblk
> end
)
852 /* Simply invalidate cache_es. */
853 tree
->cache_es
= NULL
;
855 orig_es
.es_lblk
= es
->es_lblk
;
856 orig_es
.es_len
= es
->es_len
;
857 orig_es
.es_pblk
= es
->es_pblk
;
859 len1
= lblk
> es
->es_lblk
? lblk
- es
->es_lblk
: 0;
860 len2
= ext4_es_end(es
) > end
? ext4_es_end(es
) - end
: 0;
865 struct extent_status newes
;
867 newes
.es_lblk
= end
+ 1;
869 block
= 0x7FDEADBEEFULL
;
870 if (ext4_es_is_written(&orig_es
) ||
871 ext4_es_is_unwritten(&orig_es
))
872 block
= ext4_es_pblock(&orig_es
) +
873 orig_es
.es_len
- len2
;
874 ext4_es_store_pblock_status(&newes
, block
,
875 ext4_es_status(&orig_es
));
876 err
= __es_insert_extent(inode
, &newes
);
878 es
->es_lblk
= orig_es
.es_lblk
;
879 es
->es_len
= orig_es
.es_len
;
880 if ((err
== -ENOMEM
) &&
881 __es_shrink(EXT4_SB(inode
->i_sb
),
887 es
->es_lblk
= end
+ 1;
889 if (ext4_es_is_written(es
) ||
890 ext4_es_is_unwritten(es
)) {
891 block
= orig_es
.es_pblk
+ orig_es
.es_len
- len2
;
892 ext4_es_store_pblock(es
, block
);
899 node
= rb_next(&es
->rb_node
);
901 es
= rb_entry(node
, struct extent_status
, rb_node
);
906 while (es
&& ext4_es_end(es
) <= end
) {
907 node
= rb_next(&es
->rb_node
);
908 rb_erase(&es
->rb_node
, &tree
->root
);
909 ext4_es_free_extent(inode
, es
);
914 es
= rb_entry(node
, struct extent_status
, rb_node
);
917 if (es
&& es
->es_lblk
< end
+ 1) {
918 ext4_lblk_t orig_len
= es
->es_len
;
920 len1
= ext4_es_end(es
) - end
;
921 es
->es_lblk
= end
+ 1;
923 if (ext4_es_is_written(es
) || ext4_es_is_unwritten(es
)) {
924 block
= es
->es_pblk
+ orig_len
- len1
;
925 ext4_es_store_pblock(es
, block
);
934 * ext4_es_remove_extent() removes a space from a extent status tree.
936 * Return 0 on success, error code on failure.
938 int ext4_es_remove_extent(struct inode
*inode
, ext4_lblk_t lblk
,
944 trace_ext4_es_remove_extent(inode
, lblk
, len
);
945 es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
946 lblk
, len
, inode
->i_ino
);
951 end
= lblk
+ len
- 1;
955 * ext4_clear_inode() depends on us taking i_es_lock unconditionally
956 * so that we are sure __es_shrink() is done with the inode before it
959 write_lock(&EXT4_I(inode
)->i_es_lock
);
960 err
= __es_remove_extent(inode
, lblk
, end
);
961 write_unlock(&EXT4_I(inode
)->i_es_lock
);
962 ext4_es_print_tree(inode
);
966 static int __es_shrink(struct ext4_sb_info
*sbi
, int nr_to_scan
,
967 struct ext4_inode_info
*locked_ei
)
969 struct ext4_inode_info
*ei
;
970 struct ext4_es_stats
*es_stats
;
975 int retried
= 0, nr_skipped
= 0;
977 es_stats
= &sbi
->s_es_stats
;
978 start_time
= ktime_get();
981 spin_lock(&sbi
->s_es_lock
);
982 nr_to_walk
= sbi
->s_es_nr_inode
;
983 while (nr_to_walk
-- > 0) {
984 if (list_empty(&sbi
->s_es_list
)) {
985 spin_unlock(&sbi
->s_es_lock
);
988 ei
= list_first_entry(&sbi
->s_es_list
, struct ext4_inode_info
,
990 /* Move the inode to the tail */
991 list_move_tail(&ei
->i_es_list
, &sbi
->s_es_list
);
994 * Normally we try hard to avoid shrinking precached inodes,
995 * but we will as a last resort.
997 if (!retried
&& ext4_test_inode_state(&ei
->vfs_inode
,
998 EXT4_STATE_EXT_PRECACHED
)) {
1003 if (ei
== locked_ei
|| !write_trylock(&ei
->i_es_lock
)) {
1008 * Now we hold i_es_lock which protects us from inode reclaim
1009 * freeing inode under us
1011 spin_unlock(&sbi
->s_es_lock
);
1013 nr_shrunk
+= es_reclaim_extents(ei
, &nr_to_scan
);
1014 write_unlock(&ei
->i_es_lock
);
1016 if (nr_to_scan
<= 0)
1018 spin_lock(&sbi
->s_es_lock
);
1020 spin_unlock(&sbi
->s_es_lock
);
1023 * If we skipped any inodes, and we weren't able to make any
1024 * forward progress, try again to scan precached inodes.
1026 if ((nr_shrunk
== 0) && nr_skipped
&& !retried
) {
1031 if (locked_ei
&& nr_shrunk
== 0)
1032 nr_shrunk
= es_reclaim_extents(locked_ei
, &nr_to_scan
);
1035 scan_time
= ktime_to_ns(ktime_sub(ktime_get(), start_time
));
1036 if (likely(es_stats
->es_stats_scan_time
))
1037 es_stats
->es_stats_scan_time
= (scan_time
+
1038 es_stats
->es_stats_scan_time
*3) / 4;
1040 es_stats
->es_stats_scan_time
= scan_time
;
1041 if (scan_time
> es_stats
->es_stats_max_scan_time
)
1042 es_stats
->es_stats_max_scan_time
= scan_time
;
1043 if (likely(es_stats
->es_stats_shrunk
))
1044 es_stats
->es_stats_shrunk
= (nr_shrunk
+
1045 es_stats
->es_stats_shrunk
*3) / 4;
1047 es_stats
->es_stats_shrunk
= nr_shrunk
;
1049 trace_ext4_es_shrink(sbi
->s_sb
, nr_shrunk
, scan_time
,
1050 nr_skipped
, retried
);
1054 static unsigned long ext4_es_count(struct shrinker
*shrink
,
1055 struct shrink_control
*sc
)
1058 struct ext4_sb_info
*sbi
;
1060 sbi
= container_of(shrink
, struct ext4_sb_info
, s_es_shrinker
);
1061 nr
= percpu_counter_read_positive(&sbi
->s_es_stats
.es_stats_shk_cnt
);
1062 trace_ext4_es_shrink_count(sbi
->s_sb
, sc
->nr_to_scan
, nr
);
1066 static unsigned long ext4_es_scan(struct shrinker
*shrink
,
1067 struct shrink_control
*sc
)
1069 struct ext4_sb_info
*sbi
= container_of(shrink
,
1070 struct ext4_sb_info
, s_es_shrinker
);
1071 int nr_to_scan
= sc
->nr_to_scan
;
1074 ret
= percpu_counter_read_positive(&sbi
->s_es_stats
.es_stats_shk_cnt
);
1075 trace_ext4_es_shrink_scan_enter(sbi
->s_sb
, nr_to_scan
, ret
);
1080 nr_shrunk
= __es_shrink(sbi
, nr_to_scan
, NULL
);
1082 trace_ext4_es_shrink_scan_exit(sbi
->s_sb
, nr_shrunk
, ret
);
1086 static void *ext4_es_seq_shrinker_info_start(struct seq_file
*seq
, loff_t
*pos
)
1088 return *pos
? NULL
: SEQ_START_TOKEN
;
1092 ext4_es_seq_shrinker_info_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1097 static int ext4_es_seq_shrinker_info_show(struct seq_file
*seq
, void *v
)
1099 struct ext4_sb_info
*sbi
= seq
->private;
1100 struct ext4_es_stats
*es_stats
= &sbi
->s_es_stats
;
1101 struct ext4_inode_info
*ei
, *max
= NULL
;
1102 unsigned int inode_cnt
= 0;
1104 if (v
!= SEQ_START_TOKEN
)
1107 /* here we just find an inode that has the max nr. of objects */
1108 spin_lock(&sbi
->s_es_lock
);
1109 list_for_each_entry(ei
, &sbi
->s_es_list
, i_es_list
) {
1111 if (max
&& max
->i_es_all_nr
< ei
->i_es_all_nr
)
1116 spin_unlock(&sbi
->s_es_lock
);
1118 seq_printf(seq
, "stats:\n %lld objects\n %lld reclaimable objects\n",
1119 percpu_counter_sum_positive(&es_stats
->es_stats_all_cnt
),
1120 percpu_counter_sum_positive(&es_stats
->es_stats_shk_cnt
));
1121 seq_printf(seq
, " %lu/%lu cache hits/misses\n",
1122 es_stats
->es_stats_cache_hits
,
1123 es_stats
->es_stats_cache_misses
);
1125 seq_printf(seq
, " %d inodes on list\n", inode_cnt
);
1127 seq_printf(seq
, "average:\n %llu us scan time\n",
1128 div_u64(es_stats
->es_stats_scan_time
, 1000));
1129 seq_printf(seq
, " %lu shrunk objects\n", es_stats
->es_stats_shrunk
);
1132 "maximum:\n %lu inode (%u objects, %u reclaimable)\n"
1133 " %llu us max scan time\n",
1134 max
->vfs_inode
.i_ino
, max
->i_es_all_nr
, max
->i_es_shk_nr
,
1135 div_u64(es_stats
->es_stats_max_scan_time
, 1000));
1140 static void ext4_es_seq_shrinker_info_stop(struct seq_file
*seq
, void *v
)
1144 static const struct seq_operations ext4_es_seq_shrinker_info_ops
= {
1145 .start
= ext4_es_seq_shrinker_info_start
,
1146 .next
= ext4_es_seq_shrinker_info_next
,
1147 .stop
= ext4_es_seq_shrinker_info_stop
,
1148 .show
= ext4_es_seq_shrinker_info_show
,
1152 ext4_es_seq_shrinker_info_open(struct inode
*inode
, struct file
*file
)
1156 ret
= seq_open(file
, &ext4_es_seq_shrinker_info_ops
);
1158 struct seq_file
*m
= file
->private_data
;
1159 m
->private = PDE_DATA(inode
);
1166 ext4_es_seq_shrinker_info_release(struct inode
*inode
, struct file
*file
)
1168 return seq_release(inode
, file
);
1171 static const struct file_operations ext4_es_seq_shrinker_info_fops
= {
1172 .owner
= THIS_MODULE
,
1173 .open
= ext4_es_seq_shrinker_info_open
,
1175 .llseek
= seq_lseek
,
1176 .release
= ext4_es_seq_shrinker_info_release
,
1179 int ext4_es_register_shrinker(struct ext4_sb_info
*sbi
)
1183 /* Make sure we have enough bits for physical block number */
1184 BUILD_BUG_ON(ES_SHIFT
< 48);
1185 INIT_LIST_HEAD(&sbi
->s_es_list
);
1186 sbi
->s_es_nr_inode
= 0;
1187 spin_lock_init(&sbi
->s_es_lock
);
1188 sbi
->s_es_stats
.es_stats_shrunk
= 0;
1189 sbi
->s_es_stats
.es_stats_cache_hits
= 0;
1190 sbi
->s_es_stats
.es_stats_cache_misses
= 0;
1191 sbi
->s_es_stats
.es_stats_scan_time
= 0;
1192 sbi
->s_es_stats
.es_stats_max_scan_time
= 0;
1193 err
= percpu_counter_init(&sbi
->s_es_stats
.es_stats_all_cnt
, 0, GFP_KERNEL
);
1196 err
= percpu_counter_init(&sbi
->s_es_stats
.es_stats_shk_cnt
, 0, GFP_KERNEL
);
1200 sbi
->s_es_shrinker
.scan_objects
= ext4_es_scan
;
1201 sbi
->s_es_shrinker
.count_objects
= ext4_es_count
;
1202 sbi
->s_es_shrinker
.seeks
= DEFAULT_SEEKS
;
1203 err
= register_shrinker(&sbi
->s_es_shrinker
);
1208 proc_create_data("es_shrinker_info", S_IRUGO
, sbi
->s_proc
,
1209 &ext4_es_seq_shrinker_info_fops
, sbi
);
1214 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_shk_cnt
);
1216 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_all_cnt
);
1220 void ext4_es_unregister_shrinker(struct ext4_sb_info
*sbi
)
1223 remove_proc_entry("es_shrinker_info", sbi
->s_proc
);
1224 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_all_cnt
);
1225 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_shk_cnt
);
1226 unregister_shrinker(&sbi
->s_es_shrinker
);
1230 * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at
1231 * most *nr_to_scan extents, update *nr_to_scan accordingly.
1233 * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan.
1234 * Increment *nr_shrunk by the number of reclaimed extents. Also update
1235 * ei->i_es_shrink_lblk to where we should continue scanning.
1237 static int es_do_reclaim_extents(struct ext4_inode_info
*ei
, ext4_lblk_t end
,
1238 int *nr_to_scan
, int *nr_shrunk
)
1240 struct inode
*inode
= &ei
->vfs_inode
;
1241 struct ext4_es_tree
*tree
= &ei
->i_es_tree
;
1242 struct extent_status
*es
;
1243 struct rb_node
*node
;
1245 es
= __es_tree_search(&tree
->root
, ei
->i_es_shrink_lblk
);
1248 node
= &es
->rb_node
;
1249 while (*nr_to_scan
> 0) {
1250 if (es
->es_lblk
> end
) {
1251 ei
->i_es_shrink_lblk
= end
+ 1;
1256 node
= rb_next(&es
->rb_node
);
1258 * We can't reclaim delayed extent from status tree because
1259 * fiemap, bigallic, and seek_data/hole need to use it.
1261 if (ext4_es_is_delayed(es
))
1263 if (ext4_es_is_referenced(es
)) {
1264 ext4_es_clear_referenced(es
);
1268 rb_erase(&es
->rb_node
, &tree
->root
);
1269 ext4_es_free_extent(inode
, es
);
1274 es
= rb_entry(node
, struct extent_status
, rb_node
);
1276 ei
->i_es_shrink_lblk
= es
->es_lblk
;
1279 ei
->i_es_shrink_lblk
= 0;
1283 static int es_reclaim_extents(struct ext4_inode_info
*ei
, int *nr_to_scan
)
1285 struct inode
*inode
= &ei
->vfs_inode
;
1287 ext4_lblk_t start
= ei
->i_es_shrink_lblk
;
1288 static DEFINE_RATELIMIT_STATE(_rs
, DEFAULT_RATELIMIT_INTERVAL
,
1289 DEFAULT_RATELIMIT_BURST
);
1291 if (ei
->i_es_shk_nr
== 0)
1294 if (ext4_test_inode_state(inode
, EXT4_STATE_EXT_PRECACHED
) &&
1296 ext4_warning(inode
->i_sb
, "forced shrink of precached extents");
1298 if (!es_do_reclaim_extents(ei
, EXT_MAX_BLOCKS
, nr_to_scan
, &nr_shrunk
) &&
1300 es_do_reclaim_extents(ei
, start
- 1, nr_to_scan
, &nr_shrunk
);
1302 ei
->i_es_tree
.cache_es
= NULL
;