2 * linux/fs/ext4/indirect.c
6 * linux/fs/ext4/inode.c
8 * Copyright (C) 1992, 1993, 1994, 1995
9 * Remy Card (card@masi.ibp.fr)
10 * Laboratoire MASI - Institut Blaise Pascal
11 * Universite Pierre et Marie Curie (Paris VI)
15 * linux/fs/minix/inode.c
17 * Copyright (C) 1991, 1992 Linus Torvalds
19 * Goal-directed block allocation by Stephen Tweedie
20 * (sct@redhat.com), 1993, 1998
23 #include "ext4_jbd2.h"
25 #include "ext4_extents.h" /* Needed for EXT_MAX_BLOCKS */
27 #include <trace/events/ext4.h>
32 struct buffer_head
*bh
;
35 static inline void add_chain(Indirect
*p
, struct buffer_head
*bh
, __le32
*v
)
42 * ext4_block_to_path - parse the block number into array of offsets
43 * @inode: inode in question (we are only interested in its superblock)
44 * @i_block: block number to be parsed
45 * @offsets: array to store the offsets in
46 * @boundary: set this non-zero if the referred-to block is likely to be
47 * followed (on disk) by an indirect block.
49 * To store the locations of file's data ext4 uses a data structure common
50 * for UNIX filesystems - tree of pointers anchored in the inode, with
51 * data blocks at leaves and indirect blocks in intermediate nodes.
52 * This function translates the block number into path in that tree -
53 * return value is the path length and @offsets[n] is the offset of
54 * pointer to (n+1)th node in the nth one. If @block is out of range
55 * (negative or too large) warning is printed and zero returned.
57 * Note: function doesn't find node addresses, so no IO is needed. All
58 * we need to know is the capacity of indirect blocks (taken from the
63 * Portability note: the last comparison (check that we fit into triple
64 * indirect block) is spelled differently, because otherwise on an
65 * architecture with 32-bit longs and 8Kb pages we might get into trouble
66 * if our filesystem had 8Kb blocks. We might use long long, but that would
67 * kill us on x86. Oh, well, at least the sign propagation does not matter -
68 * i_block would have to be negative in the very beginning, so we would not
72 static int ext4_block_to_path(struct inode
*inode
,
74 ext4_lblk_t offsets
[4], int *boundary
)
76 int ptrs
= EXT4_ADDR_PER_BLOCK(inode
->i_sb
);
77 int ptrs_bits
= EXT4_ADDR_PER_BLOCK_BITS(inode
->i_sb
);
78 const long direct_blocks
= EXT4_NDIR_BLOCKS
,
79 indirect_blocks
= ptrs
,
80 double_blocks
= (1 << (ptrs_bits
* 2));
84 if (i_block
< direct_blocks
) {
85 offsets
[n
++] = i_block
;
86 final
= direct_blocks
;
87 } else if ((i_block
-= direct_blocks
) < indirect_blocks
) {
88 offsets
[n
++] = EXT4_IND_BLOCK
;
89 offsets
[n
++] = i_block
;
91 } else if ((i_block
-= indirect_blocks
) < double_blocks
) {
92 offsets
[n
++] = EXT4_DIND_BLOCK
;
93 offsets
[n
++] = i_block
>> ptrs_bits
;
94 offsets
[n
++] = i_block
& (ptrs
- 1);
96 } else if (((i_block
-= double_blocks
) >> (ptrs_bits
* 2)) < ptrs
) {
97 offsets
[n
++] = EXT4_TIND_BLOCK
;
98 offsets
[n
++] = i_block
>> (ptrs_bits
* 2);
99 offsets
[n
++] = (i_block
>> ptrs_bits
) & (ptrs
- 1);
100 offsets
[n
++] = i_block
& (ptrs
- 1);
103 ext4_warning(inode
->i_sb
, "block %lu > max in inode %lu",
104 i_block
+ direct_blocks
+
105 indirect_blocks
+ double_blocks
, inode
->i_ino
);
108 *boundary
= final
- 1 - (i_block
& (ptrs
- 1));
113 * ext4_get_branch - read the chain of indirect blocks leading to data
114 * @inode: inode in question
115 * @depth: depth of the chain (1 - direct pointer, etc.)
116 * @offsets: offsets of pointers in inode/indirect blocks
117 * @chain: place to store the result
118 * @err: here we store the error value
120 * Function fills the array of triples <key, p, bh> and returns %NULL
121 * if everything went OK or the pointer to the last filled triple
122 * (incomplete one) otherwise. Upon the return chain[i].key contains
123 * the number of (i+1)-th block in the chain (as it is stored in memory,
124 * i.e. little-endian 32-bit), chain[i].p contains the address of that
125 * number (it points into struct inode for i==0 and into the bh->b_data
126 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
127 * block for i>0 and NULL for i==0. In other words, it holds the block
128 * numbers of the chain, addresses they were taken from (and where we can
129 * verify that chain did not change) and buffer_heads hosting these
132 * Function stops when it stumbles upon zero pointer (absent block)
133 * (pointer to last triple returned, *@err == 0)
134 * or when it gets an IO error reading an indirect block
135 * (ditto, *@err == -EIO)
136 * or when it reads all @depth-1 indirect blocks successfully and finds
137 * the whole chain, all way to the data (returns %NULL, *err == 0).
139 * Need to be called with
140 * down_read(&EXT4_I(inode)->i_data_sem)
142 static Indirect
*ext4_get_branch(struct inode
*inode
, int depth
,
143 ext4_lblk_t
*offsets
,
144 Indirect chain
[4], int *err
)
146 struct super_block
*sb
= inode
->i_sb
;
148 struct buffer_head
*bh
;
152 /* i_data is not going away, no lock needed */
153 add_chain(chain
, NULL
, EXT4_I(inode
)->i_data
+ *offsets
);
157 bh
= sb_getblk(sb
, le32_to_cpu(p
->key
));
163 if (!bh_uptodate_or_lock(bh
)) {
164 if (bh_submit_read(bh
) < 0) {
168 /* validate block references */
169 if (ext4_check_indirect_blockref(inode
, bh
)) {
175 add_chain(++p
, bh
, (__le32
*)bh
->b_data
+ *++offsets
);
189 * ext4_find_near - find a place for allocation with sufficient locality
191 * @ind: descriptor of indirect block.
193 * This function returns the preferred place for block allocation.
194 * It is used when heuristic for sequential allocation fails.
196 * + if there is a block to the left of our position - allocate near it.
197 * + if pointer will live in indirect block - allocate near that block.
198 * + if pointer will live in inode - allocate in the same
201 * In the latter case we colour the starting block by the callers PID to
202 * prevent it from clashing with concurrent allocations for a different inode
203 * in the same block group. The PID is used here so that functionally related
204 * files will be close-by on-disk.
206 * Caller must make sure that @ind is valid and will stay that way.
208 static ext4_fsblk_t
ext4_find_near(struct inode
*inode
, Indirect
*ind
)
210 struct ext4_inode_info
*ei
= EXT4_I(inode
);
211 __le32
*start
= ind
->bh
? (__le32
*) ind
->bh
->b_data
: ei
->i_data
;
214 /* Try to find previous block */
215 for (p
= ind
->p
- 1; p
>= start
; p
--) {
217 return le32_to_cpu(*p
);
220 /* No such thing, so let's try location of indirect block */
222 return ind
->bh
->b_blocknr
;
225 * It is going to be referred to from the inode itself? OK, just put it
226 * into the same cylinder group then.
228 return ext4_inode_to_goal_block(inode
);
232 * ext4_find_goal - find a preferred place for allocation.
234 * @block: block we want
235 * @partial: pointer to the last triple within a chain
237 * Normally this function find the preferred place for block allocation,
239 * Because this is only used for non-extent files, we limit the block nr
242 static ext4_fsblk_t
ext4_find_goal(struct inode
*inode
, ext4_lblk_t block
,
248 * XXX need to get goal block from mballoc's data structures
251 goal
= ext4_find_near(inode
, partial
);
252 goal
= goal
& EXT4_MAX_BLOCK_FILE_PHYS
;
257 * ext4_blks_to_allocate - Look up the block map and count the number
258 * of direct blocks need to be allocated for the given branch.
260 * @branch: chain of indirect blocks
261 * @k: number of blocks need for indirect blocks
262 * @blks: number of data blocks to be mapped.
263 * @blocks_to_boundary: the offset in the indirect block
265 * return the total number of blocks to be allocate, including the
266 * direct and indirect blocks.
268 static int ext4_blks_to_allocate(Indirect
*branch
, int k
, unsigned int blks
,
269 int blocks_to_boundary
)
271 unsigned int count
= 0;
274 * Simple case, [t,d]Indirect block(s) has not allocated yet
275 * then it's clear blocks on that path have not allocated
278 /* right now we don't handle cross boundary allocation */
279 if (blks
< blocks_to_boundary
+ 1)
282 count
+= blocks_to_boundary
+ 1;
287 while (count
< blks
&& count
<= blocks_to_boundary
&&
288 le32_to_cpu(*(branch
[0].p
+ count
)) == 0) {
295 * ext4_alloc_blocks: multiple allocate blocks needed for a branch
296 * @handle: handle for this transaction
297 * @inode: inode which needs allocated blocks
298 * @iblock: the logical block to start allocated at
299 * @goal: preferred physical block of allocation
300 * @indirect_blks: the number of blocks need to allocate for indirect
302 * @blks: number of desired blocks
303 * @new_blocks: on return it will store the new block numbers for
304 * the indirect blocks(if needed) and the first direct block,
305 * @err: on return it will store the error code
307 * This function will return the number of blocks allocated as
308 * requested by the passed-in parameters.
310 static int ext4_alloc_blocks(handle_t
*handle
, struct inode
*inode
,
311 ext4_lblk_t iblock
, ext4_fsblk_t goal
,
312 int indirect_blks
, int blks
,
313 ext4_fsblk_t new_blocks
[4], int *err
)
315 struct ext4_allocation_request ar
;
317 unsigned long count
= 0, blk_allocated
= 0;
319 ext4_fsblk_t current_block
= 0;
323 * Here we try to allocate the requested multiple blocks at once,
324 * on a best-effort basis.
325 * To build a branch, we should allocate blocks for
326 * the indirect blocks(if not allocated yet), and at least
327 * the first direct block of this branch. That's the
328 * minimum number of blocks need to allocate(required)
330 /* first we try to allocate the indirect blocks */
331 target
= indirect_blks
;
334 /* allocating blocks for indirect blocks and direct blocks */
335 current_block
= ext4_new_meta_blocks(handle
, inode
, goal
,
340 if (unlikely(current_block
+ count
> EXT4_MAX_BLOCK_FILE_PHYS
)) {
341 EXT4_ERROR_INODE(inode
,
342 "current_block %llu + count %lu > %d!",
343 current_block
, count
,
344 EXT4_MAX_BLOCK_FILE_PHYS
);
350 /* allocate blocks for indirect blocks */
351 while (index
< indirect_blks
&& count
) {
352 new_blocks
[index
++] = current_block
++;
357 * save the new block number
358 * for the first direct block
360 new_blocks
[index
] = current_block
;
361 WARN(1, KERN_INFO
"%s returned more blocks than "
362 "requested\n", __func__
);
367 target
= blks
- count
;
368 blk_allocated
= count
;
371 /* Now allocate data blocks */
372 memset(&ar
, 0, sizeof(ar
));
377 if (S_ISREG(inode
->i_mode
))
378 /* enable in-core preallocation only for regular files */
379 ar
.flags
= EXT4_MB_HINT_DATA
;
381 current_block
= ext4_mb_new_blocks(handle
, &ar
, err
);
382 if (unlikely(current_block
+ ar
.len
> EXT4_MAX_BLOCK_FILE_PHYS
)) {
383 EXT4_ERROR_INODE(inode
,
384 "current_block %llu + ar.len %d > %d!",
385 current_block
, ar
.len
,
386 EXT4_MAX_BLOCK_FILE_PHYS
);
391 if (*err
&& (target
== blks
)) {
393 * if the allocation failed and we didn't allocate
399 if (target
== blks
) {
401 * save the new block number
402 * for the first direct block
404 new_blocks
[index
] = current_block
;
406 blk_allocated
+= ar
.len
;
409 /* total number of blocks allocated for direct blocks */
414 for (i
= 0; i
< index
; i
++)
415 ext4_free_blocks(handle
, inode
, NULL
, new_blocks
[i
], 1, 0);
420 * ext4_alloc_branch - allocate and set up a chain of blocks.
421 * @handle: handle for this transaction
423 * @indirect_blks: number of allocated indirect blocks
424 * @blks: number of allocated direct blocks
425 * @goal: preferred place for allocation
426 * @offsets: offsets (in the blocks) to store the pointers to next.
427 * @branch: place to store the chain in.
429 * This function allocates blocks, zeroes out all but the last one,
430 * links them into chain and (if we are synchronous) writes them to disk.
431 * In other words, it prepares a branch that can be spliced onto the
432 * inode. It stores the information about that chain in the branch[], in
433 * the same format as ext4_get_branch() would do. We are calling it after
434 * we had read the existing part of chain and partial points to the last
435 * triple of that (one with zero ->key). Upon the exit we have the same
436 * picture as after the successful ext4_get_block(), except that in one
437 * place chain is disconnected - *branch->p is still zero (we did not
438 * set the last link), but branch->key contains the number that should
439 * be placed into *branch->p to fill that gap.
441 * If allocation fails we free all blocks we've allocated (and forget
442 * their buffer_heads) and return the error value the from failed
443 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
444 * as described above and return 0.
446 static int ext4_alloc_branch(handle_t
*handle
, struct inode
*inode
,
447 ext4_lblk_t iblock
, int indirect_blks
,
448 int *blks
, ext4_fsblk_t goal
,
449 ext4_lblk_t
*offsets
, Indirect
*branch
)
451 int blocksize
= inode
->i_sb
->s_blocksize
;
454 struct buffer_head
*bh
;
456 ext4_fsblk_t new_blocks
[4];
457 ext4_fsblk_t current_block
;
459 num
= ext4_alloc_blocks(handle
, inode
, iblock
, goal
, indirect_blks
,
460 *blks
, new_blocks
, &err
);
464 branch
[0].key
= cpu_to_le32(new_blocks
[0]);
466 * metadata blocks and data blocks are allocated.
468 for (n
= 1; n
<= indirect_blks
; n
++) {
470 * Get buffer_head for parent block, zero it out
471 * and set the pointer to new one, then send
474 bh
= sb_getblk(inode
->i_sb
, new_blocks
[n
-1]);
482 BUFFER_TRACE(bh
, "call get_create_access");
483 err
= ext4_journal_get_create_access(handle
, bh
);
485 /* Don't brelse(bh) here; it's done in
486 * ext4_journal_forget() below */
491 memset(bh
->b_data
, 0, blocksize
);
492 branch
[n
].p
= (__le32
*) bh
->b_data
+ offsets
[n
];
493 branch
[n
].key
= cpu_to_le32(new_blocks
[n
]);
494 *branch
[n
].p
= branch
[n
].key
;
495 if (n
== indirect_blks
) {
496 current_block
= new_blocks
[n
];
498 * End of chain, update the last new metablock of
499 * the chain to point to the new allocated
500 * data blocks numbers
502 for (i
= 1; i
< num
; i
++)
503 *(branch
[n
].p
+ i
) = cpu_to_le32(++current_block
);
505 BUFFER_TRACE(bh
, "marking uptodate");
506 set_buffer_uptodate(bh
);
509 BUFFER_TRACE(bh
, "call ext4_handle_dirty_metadata");
510 err
= ext4_handle_dirty_metadata(handle
, inode
, bh
);
517 /* Allocation failed, free what we already allocated */
518 ext4_free_blocks(handle
, inode
, NULL
, new_blocks
[0], 1, 0);
519 for (i
= 1; i
<= n
; i
++) {
521 * branch[i].bh is newly allocated, so there is no
522 * need to revoke the block, which is why we don't
523 * need to set EXT4_FREE_BLOCKS_METADATA.
525 ext4_free_blocks(handle
, inode
, NULL
, new_blocks
[i
], 1,
526 EXT4_FREE_BLOCKS_FORGET
);
528 for (i
= n
+1; i
< indirect_blks
; i
++)
529 ext4_free_blocks(handle
, inode
, NULL
, new_blocks
[i
], 1, 0);
531 ext4_free_blocks(handle
, inode
, NULL
, new_blocks
[i
], num
, 0);
537 * ext4_splice_branch - splice the allocated branch onto inode.
538 * @handle: handle for this transaction
540 * @block: (logical) number of block we are adding
541 * @chain: chain of indirect blocks (with a missing link - see
543 * @where: location of missing link
544 * @num: number of indirect blocks we are adding
545 * @blks: number of direct blocks we are adding
547 * This function fills the missing link and does all housekeeping needed in
548 * inode (->i_blocks, etc.). In case of success we end up with the full
549 * chain to new block and return 0.
551 static int ext4_splice_branch(handle_t
*handle
, struct inode
*inode
,
552 ext4_lblk_t block
, Indirect
*where
, int num
,
557 ext4_fsblk_t current_block
;
560 * If we're splicing into a [td]indirect block (as opposed to the
561 * inode) then we need to get write access to the [td]indirect block
565 BUFFER_TRACE(where
->bh
, "get_write_access");
566 err
= ext4_journal_get_write_access(handle
, where
->bh
);
572 *where
->p
= where
->key
;
575 * Update the host buffer_head or inode to point to more just allocated
576 * direct blocks blocks
578 if (num
== 0 && blks
> 1) {
579 current_block
= le32_to_cpu(where
->key
) + 1;
580 for (i
= 1; i
< blks
; i
++)
581 *(where
->p
+ i
) = cpu_to_le32(current_block
++);
584 /* We are done with atomic stuff, now do the rest of housekeeping */
585 /* had we spliced it onto indirect block? */
588 * If we spliced it onto an indirect block, we haven't
589 * altered the inode. Note however that if it is being spliced
590 * onto an indirect block at the very end of the file (the
591 * file is growing) then we *will* alter the inode to reflect
592 * the new i_size. But that is not done here - it is done in
593 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
595 jbd_debug(5, "splicing indirect only\n");
596 BUFFER_TRACE(where
->bh
, "call ext4_handle_dirty_metadata");
597 err
= ext4_handle_dirty_metadata(handle
, inode
, where
->bh
);
602 * OK, we spliced it into the inode itself on a direct block.
604 ext4_mark_inode_dirty(handle
, inode
);
605 jbd_debug(5, "splicing direct\n");
610 for (i
= 1; i
<= num
; i
++) {
612 * branch[i].bh is newly allocated, so there is no
613 * need to revoke the block, which is why we don't
614 * need to set EXT4_FREE_BLOCKS_METADATA.
616 ext4_free_blocks(handle
, inode
, where
[i
].bh
, 0, 1,
617 EXT4_FREE_BLOCKS_FORGET
);
619 ext4_free_blocks(handle
, inode
, NULL
, le32_to_cpu(where
[num
].key
),
626 * The ext4_ind_map_blocks() function handles non-extents inodes
627 * (i.e., using the traditional indirect/double-indirect i_blocks
628 * scheme) for ext4_map_blocks().
630 * Allocation strategy is simple: if we have to allocate something, we will
631 * have to go the whole way to leaf. So let's do it before attaching anything
632 * to tree, set linkage between the newborn blocks, write them if sync is
633 * required, recheck the path, free and repeat if check fails, otherwise
634 * set the last missing link (that will protect us from any truncate-generated
635 * removals - all blocks on the path are immune now) and possibly force the
636 * write on the parent block.
637 * That has a nice additional property: no special recovery from the failed
638 * allocations is needed - we simply release blocks and do not touch anything
639 * reachable from inode.
641 * `handle' can be NULL if create == 0.
643 * return > 0, # of blocks mapped or allocated.
644 * return = 0, if plain lookup failed.
645 * return < 0, error case.
647 * The ext4_ind_get_blocks() function should be called with
648 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
649 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
650 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
653 int ext4_ind_map_blocks(handle_t
*handle
, struct inode
*inode
,
654 struct ext4_map_blocks
*map
,
658 ext4_lblk_t offsets
[4];
663 int blocks_to_boundary
= 0;
666 ext4_fsblk_t first_block
= 0;
668 trace_ext4_ind_map_blocks_enter(inode
, map
->m_lblk
, map
->m_len
, flags
);
669 J_ASSERT(!(ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
)));
670 J_ASSERT(handle
!= NULL
|| (flags
& EXT4_GET_BLOCKS_CREATE
) == 0);
671 depth
= ext4_block_to_path(inode
, map
->m_lblk
, offsets
,
672 &blocks_to_boundary
);
677 partial
= ext4_get_branch(inode
, depth
, offsets
, chain
, &err
);
679 /* Simplest case - block found, no allocation needed */
681 first_block
= le32_to_cpu(chain
[depth
- 1].key
);
684 while (count
< map
->m_len
&& count
<= blocks_to_boundary
) {
687 blk
= le32_to_cpu(*(chain
[depth
-1].p
+ count
));
689 if (blk
== first_block
+ count
)
697 /* Next simple case - plain lookup or failed read of indirect block */
698 if ((flags
& EXT4_GET_BLOCKS_CREATE
) == 0 || err
== -EIO
)
702 * Okay, we need to do block allocation.
704 if (EXT4_HAS_RO_COMPAT_FEATURE(inode
->i_sb
,
705 EXT4_FEATURE_RO_COMPAT_BIGALLOC
)) {
706 EXT4_ERROR_INODE(inode
, "Can't allocate blocks for "
707 "non-extent mapped inodes with bigalloc");
711 goal
= ext4_find_goal(inode
, map
->m_lblk
, partial
);
713 /* the number of blocks need to allocate for [d,t]indirect blocks */
714 indirect_blks
= (chain
+ depth
) - partial
- 1;
717 * Next look up the indirect map to count the totoal number of
718 * direct blocks to allocate for this branch.
720 count
= ext4_blks_to_allocate(partial
, indirect_blks
,
721 map
->m_len
, blocks_to_boundary
);
723 * Block out ext4_truncate while we alter the tree
725 err
= ext4_alloc_branch(handle
, inode
, map
->m_lblk
, indirect_blks
,
727 offsets
+ (partial
- chain
), partial
);
730 * The ext4_splice_branch call will free and forget any buffers
731 * on the new chain if there is a failure, but that risks using
732 * up transaction credits, especially for bitmaps where the
733 * credits cannot be returned. Can we handle this somehow? We
734 * may need to return -EAGAIN upwards in the worst case. --sct
737 err
= ext4_splice_branch(handle
, inode
, map
->m_lblk
,
738 partial
, indirect_blks
, count
);
742 map
->m_flags
|= EXT4_MAP_NEW
;
744 ext4_update_inode_fsync_trans(handle
, inode
, 1);
746 map
->m_flags
|= EXT4_MAP_MAPPED
;
747 map
->m_pblk
= le32_to_cpu(chain
[depth
-1].key
);
749 if (count
> blocks_to_boundary
)
750 map
->m_flags
|= EXT4_MAP_BOUNDARY
;
752 /* Clean up and exit */
753 partial
= chain
+ depth
- 1; /* the whole chain */
755 while (partial
> chain
) {
756 BUFFER_TRACE(partial
->bh
, "call brelse");
761 trace_ext4_ind_map_blocks_exit(inode
, map
, err
);
766 * O_DIRECT for ext3 (or indirect map) based files
768 * If the O_DIRECT write will extend the file then add this inode to the
769 * orphan list. So recovery will truncate it back to the original size
770 * if the machine crashes during the write.
772 * If the O_DIRECT write is intantiating holes inside i_size and the machine
773 * crashes then stale disk data _may_ be exposed inside the file. But current
774 * VFS code falls back into buffered path in that case so we are safe.
776 ssize_t
ext4_ind_direct_IO(int rw
, struct kiocb
*iocb
,
777 const struct iovec
*iov
, loff_t offset
,
778 unsigned long nr_segs
)
780 struct file
*file
= iocb
->ki_filp
;
781 struct inode
*inode
= file
->f_mapping
->host
;
782 struct ext4_inode_info
*ei
= EXT4_I(inode
);
786 size_t count
= iov_length(iov
, nr_segs
);
790 loff_t final_size
= offset
+ count
;
792 if (final_size
> inode
->i_size
) {
793 /* Credits for sb + inode write */
794 handle
= ext4_journal_start(inode
, EXT4_HT_INODE
, 2);
795 if (IS_ERR(handle
)) {
796 ret
= PTR_ERR(handle
);
799 ret
= ext4_orphan_add(handle
, inode
);
801 ext4_journal_stop(handle
);
805 ei
->i_disksize
= inode
->i_size
;
806 ext4_journal_stop(handle
);
811 if (rw
== READ
&& ext4_should_dioread_nolock(inode
)) {
812 if (unlikely(atomic_read(&EXT4_I(inode
)->i_unwritten
))) {
813 mutex_lock(&inode
->i_mutex
);
814 ext4_flush_unwritten_io(inode
);
815 mutex_unlock(&inode
->i_mutex
);
818 * Nolock dioread optimization may be dynamically disabled
819 * via ext4_inode_block_unlocked_dio(). Check inode's state
820 * while holding extra i_dio_count ref.
822 atomic_inc(&inode
->i_dio_count
);
824 if (unlikely(ext4_test_inode_state(inode
,
825 EXT4_STATE_DIOREAD_LOCK
))) {
826 inode_dio_done(inode
);
829 ret
= __blockdev_direct_IO(rw
, iocb
, inode
,
830 inode
->i_sb
->s_bdev
, iov
,
832 ext4_get_block
, NULL
, NULL
, 0);
833 inode_dio_done(inode
);
836 ret
= blockdev_direct_IO(rw
, iocb
, inode
, iov
,
837 offset
, nr_segs
, ext4_get_block
);
839 if (unlikely((rw
& WRITE
) && ret
< 0)) {
840 loff_t isize
= i_size_read(inode
);
841 loff_t end
= offset
+ iov_length(iov
, nr_segs
);
844 ext4_truncate_failed_write(inode
);
847 if (ret
== -ENOSPC
&& ext4_should_retry_alloc(inode
->i_sb
, &retries
))
853 /* Credits for sb + inode write */
854 handle
= ext4_journal_start(inode
, EXT4_HT_INODE
, 2);
855 if (IS_ERR(handle
)) {
856 /* This is really bad luck. We've written the data
857 * but cannot extend i_size. Bail out and pretend
858 * the write failed... */
859 ret
= PTR_ERR(handle
);
861 ext4_orphan_del(NULL
, inode
);
866 ext4_orphan_del(handle
, inode
);
868 loff_t end
= offset
+ ret
;
869 if (end
> inode
->i_size
) {
870 ei
->i_disksize
= end
;
871 i_size_write(inode
, end
);
873 * We're going to return a positive `ret'
874 * here due to non-zero-length I/O, so there's
875 * no way of reporting error returns from
876 * ext4_mark_inode_dirty() to userspace. So
879 ext4_mark_inode_dirty(handle
, inode
);
882 err
= ext4_journal_stop(handle
);
891 * Calculate the number of metadata blocks need to reserve
892 * to allocate a new block at @lblocks for non extent file based file
894 int ext4_ind_calc_metadata_amount(struct inode
*inode
, sector_t lblock
)
896 struct ext4_inode_info
*ei
= EXT4_I(inode
);
897 sector_t dind_mask
= ~((sector_t
)EXT4_ADDR_PER_BLOCK(inode
->i_sb
) - 1);
900 if (lblock
< EXT4_NDIR_BLOCKS
)
903 lblock
-= EXT4_NDIR_BLOCKS
;
905 if (ei
->i_da_metadata_calc_len
&&
906 (lblock
& dind_mask
) == ei
->i_da_metadata_calc_last_lblock
) {
907 ei
->i_da_metadata_calc_len
++;
910 ei
->i_da_metadata_calc_last_lblock
= lblock
& dind_mask
;
911 ei
->i_da_metadata_calc_len
= 1;
912 blk_bits
= order_base_2(lblock
);
913 return (blk_bits
/ EXT4_ADDR_PER_BLOCK_BITS(inode
->i_sb
)) + 1;
916 int ext4_ind_trans_blocks(struct inode
*inode
, int nrblocks
, int chunk
)
920 /* if nrblocks are contiguous */
923 * With N contiguous data blocks, we need at most
924 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
925 * 2 dindirect blocks, and 1 tindirect block
927 return DIV_ROUND_UP(nrblocks
,
928 EXT4_ADDR_PER_BLOCK(inode
->i_sb
)) + 4;
931 * if nrblocks are not contiguous, worse case, each block touch
932 * a indirect block, and each indirect block touch a double indirect
933 * block, plus a triple indirect block
935 indirects
= nrblocks
* 2 + 1;
940 * Truncate transactions can be complex and absolutely huge. So we need to
941 * be able to restart the transaction at a conventient checkpoint to make
942 * sure we don't overflow the journal.
944 * start_transaction gets us a new handle for a truncate transaction,
945 * and extend_transaction tries to extend the existing one a bit. If
946 * extend fails, we need to propagate the failure up and restart the
947 * transaction in the top-level truncate loop. --sct
949 static handle_t
*start_transaction(struct inode
*inode
)
953 result
= ext4_journal_start(inode
, EXT4_HT_TRUNCATE
,
954 ext4_blocks_for_truncate(inode
));
958 ext4_std_error(inode
->i_sb
, PTR_ERR(result
));
963 * Try to extend this transaction for the purposes of truncation.
965 * Returns 0 if we managed to create more room. If we can't create more
966 * room, and the transaction must be restarted we return 1.
968 static int try_to_extend_transaction(handle_t
*handle
, struct inode
*inode
)
970 if (!ext4_handle_valid(handle
))
972 if (ext4_handle_has_enough_credits(handle
, EXT4_RESERVE_TRANS_BLOCKS
+1))
974 if (!ext4_journal_extend(handle
, ext4_blocks_for_truncate(inode
)))
980 * Probably it should be a library function... search for first non-zero word
981 * or memcmp with zero_page, whatever is better for particular architecture.
984 static inline int all_zeroes(__le32
*p
, __le32
*q
)
993 * ext4_find_shared - find the indirect blocks for partial truncation.
994 * @inode: inode in question
995 * @depth: depth of the affected branch
996 * @offsets: offsets of pointers in that branch (see ext4_block_to_path)
997 * @chain: place to store the pointers to partial indirect blocks
998 * @top: place to the (detached) top of branch
1000 * This is a helper function used by ext4_truncate().
1002 * When we do truncate() we may have to clean the ends of several
1003 * indirect blocks but leave the blocks themselves alive. Block is
1004 * partially truncated if some data below the new i_size is referred
1005 * from it (and it is on the path to the first completely truncated
1006 * data block, indeed). We have to free the top of that path along
1007 * with everything to the right of the path. Since no allocation
1008 * past the truncation point is possible until ext4_truncate()
1009 * finishes, we may safely do the latter, but top of branch may
1010 * require special attention - pageout below the truncation point
1011 * might try to populate it.
1013 * We atomically detach the top of branch from the tree, store the
1014 * block number of its root in *@top, pointers to buffer_heads of
1015 * partially truncated blocks - in @chain[].bh and pointers to
1016 * their last elements that should not be removed - in
1017 * @chain[].p. Return value is the pointer to last filled element
1020 * The work left to caller to do the actual freeing of subtrees:
1021 * a) free the subtree starting from *@top
1022 * b) free the subtrees whose roots are stored in
1023 * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
1024 * c) free the subtrees growing from the inode past the @chain[0].
1025 * (no partially truncated stuff there). */
1027 static Indirect
*ext4_find_shared(struct inode
*inode
, int depth
,
1028 ext4_lblk_t offsets
[4], Indirect chain
[4],
1031 Indirect
*partial
, *p
;
1035 /* Make k index the deepest non-null offset + 1 */
1036 for (k
= depth
; k
> 1 && !offsets
[k
-1]; k
--)
1038 partial
= ext4_get_branch(inode
, k
, offsets
, chain
, &err
);
1039 /* Writer: pointers */
1041 partial
= chain
+ k
-1;
1043 * If the branch acquired continuation since we've looked at it -
1044 * fine, it should all survive and (new) top doesn't belong to us.
1046 if (!partial
->key
&& *partial
->p
)
1049 for (p
= partial
; (p
> chain
) && all_zeroes((__le32
*) p
->bh
->b_data
, p
->p
); p
--)
1052 * OK, we've found the last block that must survive. The rest of our
1053 * branch should be detached before unlocking. However, if that rest
1054 * of branch is all ours and does not grow immediately from the inode
1055 * it's easier to cheat and just decrement partial->p.
1057 if (p
== chain
+ k
- 1 && p
> chain
) {
1061 /* Nope, don't do this in ext4. Must leave the tree intact */
1068 while (partial
> p
) {
1069 brelse(partial
->bh
);
1077 * Zero a number of block pointers in either an inode or an indirect block.
1078 * If we restart the transaction we must again get write access to the
1079 * indirect block for further modification.
1081 * We release `count' blocks on disk, but (last - first) may be greater
1082 * than `count' because there can be holes in there.
1084 * Return 0 on success, 1 on invalid block range
1085 * and < 0 on fatal error.
1087 static int ext4_clear_blocks(handle_t
*handle
, struct inode
*inode
,
1088 struct buffer_head
*bh
,
1089 ext4_fsblk_t block_to_free
,
1090 unsigned long count
, __le32
*first
,
1094 int flags
= EXT4_FREE_BLOCKS_FORGET
| EXT4_FREE_BLOCKS_VALIDATED
;
1097 if (S_ISDIR(inode
->i_mode
) || S_ISLNK(inode
->i_mode
))
1098 flags
|= EXT4_FREE_BLOCKS_METADATA
;
1100 if (!ext4_data_block_valid(EXT4_SB(inode
->i_sb
), block_to_free
,
1102 EXT4_ERROR_INODE(inode
, "attempt to clear invalid "
1103 "blocks %llu len %lu",
1104 (unsigned long long) block_to_free
, count
);
1108 if (try_to_extend_transaction(handle
, inode
)) {
1110 BUFFER_TRACE(bh
, "call ext4_handle_dirty_metadata");
1111 err
= ext4_handle_dirty_metadata(handle
, inode
, bh
);
1115 err
= ext4_mark_inode_dirty(handle
, inode
);
1118 err
= ext4_truncate_restart_trans(handle
, inode
,
1119 ext4_blocks_for_truncate(inode
));
1123 BUFFER_TRACE(bh
, "retaking write access");
1124 err
= ext4_journal_get_write_access(handle
, bh
);
1130 for (p
= first
; p
< last
; p
++)
1133 ext4_free_blocks(handle
, inode
, NULL
, block_to_free
, count
, flags
);
1136 ext4_std_error(inode
->i_sb
, err
);
1141 * ext4_free_data - free a list of data blocks
1142 * @handle: handle for this transaction
1143 * @inode: inode we are dealing with
1144 * @this_bh: indirect buffer_head which contains *@first and *@last
1145 * @first: array of block numbers
1146 * @last: points immediately past the end of array
1148 * We are freeing all blocks referred from that array (numbers are stored as
1149 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
1151 * We accumulate contiguous runs of blocks to free. Conveniently, if these
1152 * blocks are contiguous then releasing them at one time will only affect one
1153 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
1154 * actually use a lot of journal space.
1156 * @this_bh will be %NULL if @first and @last point into the inode's direct
1159 static void ext4_free_data(handle_t
*handle
, struct inode
*inode
,
1160 struct buffer_head
*this_bh
,
1161 __le32
*first
, __le32
*last
)
1163 ext4_fsblk_t block_to_free
= 0; /* Starting block # of a run */
1164 unsigned long count
= 0; /* Number of blocks in the run */
1165 __le32
*block_to_free_p
= NULL
; /* Pointer into inode/ind
1168 ext4_fsblk_t nr
; /* Current block # */
1169 __le32
*p
; /* Pointer into inode/ind
1170 for current block */
1173 if (this_bh
) { /* For indirect block */
1174 BUFFER_TRACE(this_bh
, "get_write_access");
1175 err
= ext4_journal_get_write_access(handle
, this_bh
);
1176 /* Important: if we can't update the indirect pointers
1177 * to the blocks, we can't free them. */
1182 for (p
= first
; p
< last
; p
++) {
1183 nr
= le32_to_cpu(*p
);
1185 /* accumulate blocks to free if they're contiguous */
1188 block_to_free_p
= p
;
1190 } else if (nr
== block_to_free
+ count
) {
1193 err
= ext4_clear_blocks(handle
, inode
, this_bh
,
1194 block_to_free
, count
,
1195 block_to_free_p
, p
);
1199 block_to_free_p
= p
;
1205 if (!err
&& count
> 0)
1206 err
= ext4_clear_blocks(handle
, inode
, this_bh
, block_to_free
,
1207 count
, block_to_free_p
, p
);
1213 BUFFER_TRACE(this_bh
, "call ext4_handle_dirty_metadata");
1216 * The buffer head should have an attached journal head at this
1217 * point. However, if the data is corrupted and an indirect
1218 * block pointed to itself, it would have been detached when
1219 * the block was cleared. Check for this instead of OOPSing.
1221 if ((EXT4_JOURNAL(inode
) == NULL
) || bh2jh(this_bh
))
1222 ext4_handle_dirty_metadata(handle
, inode
, this_bh
);
1224 EXT4_ERROR_INODE(inode
,
1225 "circular indirect block detected at "
1227 (unsigned long long) this_bh
->b_blocknr
);
1232 * ext4_free_branches - free an array of branches
1233 * @handle: JBD handle for this transaction
1234 * @inode: inode we are dealing with
1235 * @parent_bh: the buffer_head which contains *@first and *@last
1236 * @first: array of block numbers
1237 * @last: pointer immediately past the end of array
1238 * @depth: depth of the branches to free
1240 * We are freeing all blocks referred from these branches (numbers are
1241 * stored as little-endian 32-bit) and updating @inode->i_blocks
1244 static void ext4_free_branches(handle_t
*handle
, struct inode
*inode
,
1245 struct buffer_head
*parent_bh
,
1246 __le32
*first
, __le32
*last
, int depth
)
1251 if (ext4_handle_is_aborted(handle
))
1255 struct buffer_head
*bh
;
1256 int addr_per_block
= EXT4_ADDR_PER_BLOCK(inode
->i_sb
);
1258 while (--p
>= first
) {
1259 nr
= le32_to_cpu(*p
);
1261 continue; /* A hole */
1263 if (!ext4_data_block_valid(EXT4_SB(inode
->i_sb
),
1265 EXT4_ERROR_INODE(inode
,
1266 "invalid indirect mapped "
1267 "block %lu (level %d)",
1268 (unsigned long) nr
, depth
);
1272 /* Go read the buffer for the next level down */
1273 bh
= sb_bread(inode
->i_sb
, nr
);
1276 * A read failure? Report error and clear slot
1280 EXT4_ERROR_INODE_BLOCK(inode
, nr
,
1285 /* This zaps the entire block. Bottom up. */
1286 BUFFER_TRACE(bh
, "free child branches");
1287 ext4_free_branches(handle
, inode
, bh
,
1288 (__le32
*) bh
->b_data
,
1289 (__le32
*) bh
->b_data
+ addr_per_block
,
1294 * Everything below this this pointer has been
1295 * released. Now let this top-of-subtree go.
1297 * We want the freeing of this indirect block to be
1298 * atomic in the journal with the updating of the
1299 * bitmap block which owns it. So make some room in
1302 * We zero the parent pointer *after* freeing its
1303 * pointee in the bitmaps, so if extend_transaction()
1304 * for some reason fails to put the bitmap changes and
1305 * the release into the same transaction, recovery
1306 * will merely complain about releasing a free block,
1307 * rather than leaking blocks.
1309 if (ext4_handle_is_aborted(handle
))
1311 if (try_to_extend_transaction(handle
, inode
)) {
1312 ext4_mark_inode_dirty(handle
, inode
);
1313 ext4_truncate_restart_trans(handle
, inode
,
1314 ext4_blocks_for_truncate(inode
));
1318 * The forget flag here is critical because if
1319 * we are journaling (and not doing data
1320 * journaling), we have to make sure a revoke
1321 * record is written to prevent the journal
1322 * replay from overwriting the (former)
1323 * indirect block if it gets reallocated as a
1324 * data block. This must happen in the same
1325 * transaction where the data blocks are
1328 ext4_free_blocks(handle
, inode
, NULL
, nr
, 1,
1329 EXT4_FREE_BLOCKS_METADATA
|
1330 EXT4_FREE_BLOCKS_FORGET
);
1334 * The block which we have just freed is
1335 * pointed to by an indirect block: journal it
1337 BUFFER_TRACE(parent_bh
, "get_write_access");
1338 if (!ext4_journal_get_write_access(handle
,
1341 BUFFER_TRACE(parent_bh
,
1342 "call ext4_handle_dirty_metadata");
1343 ext4_handle_dirty_metadata(handle
,
1350 /* We have reached the bottom of the tree. */
1351 BUFFER_TRACE(parent_bh
, "free data blocks");
1352 ext4_free_data(handle
, inode
, parent_bh
, first
, last
);
1356 void ext4_ind_truncate(struct inode
*inode
)
1359 struct ext4_inode_info
*ei
= EXT4_I(inode
);
1360 __le32
*i_data
= ei
->i_data
;
1361 int addr_per_block
= EXT4_ADDR_PER_BLOCK(inode
->i_sb
);
1362 struct address_space
*mapping
= inode
->i_mapping
;
1363 ext4_lblk_t offsets
[4];
1368 ext4_lblk_t last_block
, max_block
;
1370 unsigned blocksize
= inode
->i_sb
->s_blocksize
;
1373 handle
= start_transaction(inode
);
1375 return; /* AKPM: return what? */
1377 last_block
= (inode
->i_size
+ blocksize
-1)
1378 >> EXT4_BLOCK_SIZE_BITS(inode
->i_sb
);
1379 max_block
= (EXT4_SB(inode
->i_sb
)->s_bitmap_maxbytes
+ blocksize
-1)
1380 >> EXT4_BLOCK_SIZE_BITS(inode
->i_sb
);
1382 if (inode
->i_size
% PAGE_CACHE_SIZE
!= 0) {
1383 page_len
= PAGE_CACHE_SIZE
-
1384 (inode
->i_size
& (PAGE_CACHE_SIZE
- 1));
1386 err
= ext4_discard_partial_page_buffers(handle
,
1387 mapping
, inode
->i_size
, page_len
, 0);
1393 if (last_block
!= max_block
) {
1394 n
= ext4_block_to_path(inode
, last_block
, offsets
, NULL
);
1396 goto out_stop
; /* error */
1400 * OK. This truncate is going to happen. We add the inode to the
1401 * orphan list, so that if this truncate spans multiple transactions,
1402 * and we crash, we will resume the truncate when the filesystem
1403 * recovers. It also marks the inode dirty, to catch the new size.
1405 * Implication: the file must always be in a sane, consistent
1406 * truncatable state while each transaction commits.
1408 if (ext4_orphan_add(handle
, inode
))
1412 * From here we block out all ext4_get_block() callers who want to
1413 * modify the block allocation tree.
1415 down_write(&ei
->i_data_sem
);
1417 ext4_discard_preallocations(inode
);
1418 ext4_es_remove_extent(inode
, last_block
, EXT_MAX_BLOCKS
- last_block
);
1421 * The orphan list entry will now protect us from any crash which
1422 * occurs before the truncate completes, so it is now safe to propagate
1423 * the new, shorter inode size (held for now in i_size) into the
1424 * on-disk inode. We do this via i_disksize, which is the value which
1425 * ext4 *really* writes onto the disk inode.
1427 ei
->i_disksize
= inode
->i_size
;
1429 if (last_block
== max_block
) {
1431 * It is unnecessary to free any data blocks if last_block is
1432 * equal to the indirect block limit.
1435 } else if (n
== 1) { /* direct blocks */
1436 ext4_free_data(handle
, inode
, NULL
, i_data
+offsets
[0],
1437 i_data
+ EXT4_NDIR_BLOCKS
);
1441 partial
= ext4_find_shared(inode
, n
, offsets
, chain
, &nr
);
1442 /* Kill the top of shared branch (not detached) */
1444 if (partial
== chain
) {
1445 /* Shared branch grows from the inode */
1446 ext4_free_branches(handle
, inode
, NULL
,
1447 &nr
, &nr
+1, (chain
+n
-1) - partial
);
1450 * We mark the inode dirty prior to restart,
1451 * and prior to stop. No need for it here.
1454 /* Shared branch grows from an indirect block */
1455 BUFFER_TRACE(partial
->bh
, "get_write_access");
1456 ext4_free_branches(handle
, inode
, partial
->bh
,
1458 partial
->p
+1, (chain
+n
-1) - partial
);
1461 /* Clear the ends of indirect blocks on the shared branch */
1462 while (partial
> chain
) {
1463 ext4_free_branches(handle
, inode
, partial
->bh
, partial
->p
+ 1,
1464 (__le32
*)partial
->bh
->b_data
+addr_per_block
,
1465 (chain
+n
-1) - partial
);
1466 BUFFER_TRACE(partial
->bh
, "call brelse");
1467 brelse(partial
->bh
);
1471 /* Kill the remaining (whole) subtrees */
1472 switch (offsets
[0]) {
1474 nr
= i_data
[EXT4_IND_BLOCK
];
1476 ext4_free_branches(handle
, inode
, NULL
, &nr
, &nr
+1, 1);
1477 i_data
[EXT4_IND_BLOCK
] = 0;
1479 case EXT4_IND_BLOCK
:
1480 nr
= i_data
[EXT4_DIND_BLOCK
];
1482 ext4_free_branches(handle
, inode
, NULL
, &nr
, &nr
+1, 2);
1483 i_data
[EXT4_DIND_BLOCK
] = 0;
1485 case EXT4_DIND_BLOCK
:
1486 nr
= i_data
[EXT4_TIND_BLOCK
];
1488 ext4_free_branches(handle
, inode
, NULL
, &nr
, &nr
+1, 3);
1489 i_data
[EXT4_TIND_BLOCK
] = 0;
1491 case EXT4_TIND_BLOCK
:
1496 up_write(&ei
->i_data_sem
);
1497 inode
->i_mtime
= inode
->i_ctime
= ext4_current_time(inode
);
1498 ext4_mark_inode_dirty(handle
, inode
);
1501 * In a multi-transaction truncate, we only make the final transaction
1505 ext4_handle_sync(handle
);
1508 * If this was a simple ftruncate(), and the file will remain alive
1509 * then we need to clear up the orphan record which we created above.
1510 * However, if this was a real unlink then we were called by
1511 * ext4_delete_inode(), and we allow that function to clean up the
1512 * orphan info for us.
1515 ext4_orphan_del(handle
, inode
);
1517 ext4_journal_stop(handle
);
1518 trace_ext4_truncate_exit(inode
);
1521 static int free_hole_blocks(handle_t
*handle
, struct inode
*inode
,
1522 struct buffer_head
*parent_bh
, __le32
*i_data
,
1523 int level
, ext4_lblk_t first
,
1524 ext4_lblk_t count
, int max
)
1526 struct buffer_head
*bh
= NULL
;
1527 int addr_per_block
= EXT4_ADDR_PER_BLOCK(inode
->i_sb
);
1533 inc
= 1 << ((EXT4_BLOCK_SIZE_BITS(inode
->i_sb
) - 2) * level
);
1534 for (i
= 0, offset
= 0; i
< max
; i
++, i_data
++, offset
+= inc
) {
1535 if (offset
>= count
+ first
)
1537 if (*i_data
== 0 || (offset
+ inc
) <= first
)
1542 bh
= sb_bread(inode
->i_sb
, blk
);
1544 EXT4_ERROR_INODE_BLOCK(inode
, blk
,
1548 first2
= (first
> offset
) ? first
- offset
: 0;
1549 ret
= free_hole_blocks(handle
, inode
, bh
,
1550 (__le32
*)bh
->b_data
, level
- 1,
1551 first2
, count
- offset
,
1552 inode
->i_sb
->s_blocksize
>> 2);
1559 (bh
&& all_zeroes((__le32
*)bh
->b_data
,
1560 (__le32
*)bh
->b_data
+ addr_per_block
))) {
1561 ext4_free_data(handle
, inode
, parent_bh
, &blk
, &blk
+1);
1572 static int ext4_free_hole_blocks(handle_t
*handle
, struct inode
*inode
,
1573 ext4_lblk_t first
, ext4_lblk_t stop
)
1575 int addr_per_block
= EXT4_ADDR_PER_BLOCK(inode
->i_sb
);
1577 int num
= EXT4_NDIR_BLOCKS
;
1578 ext4_lblk_t count
, max
= EXT4_NDIR_BLOCKS
;
1579 __le32
*i_data
= EXT4_I(inode
)->i_data
;
1581 count
= stop
- first
;
1582 for (level
= 0; level
< 4; level
++, max
*= addr_per_block
) {
1584 ret
= free_hole_blocks(handle
, inode
, NULL
, i_data
,
1585 level
, first
, count
, num
);
1588 if (count
> max
- first
)
1589 count
-= max
- first
;
1607 int ext4_ind_punch_hole(struct file
*file
, loff_t offset
, loff_t length
)
1609 struct inode
*inode
= file_inode(file
);
1610 struct super_block
*sb
= inode
->i_sb
;
1611 ext4_lblk_t first_block
, stop_block
;
1612 struct address_space
*mapping
= inode
->i_mapping
;
1613 handle_t
*handle
= NULL
;
1614 loff_t first_page
, last_page
, page_len
;
1615 loff_t first_page_offset
, last_page_offset
;
1619 * Write out all dirty pages to avoid race conditions
1620 * Then release them.
1622 if (mapping
->nrpages
&& mapping_tagged(mapping
, PAGECACHE_TAG_DIRTY
)) {
1623 err
= filemap_write_and_wait_range(mapping
,
1624 offset
, offset
+ length
- 1);
1629 mutex_lock(&inode
->i_mutex
);
1630 /* It's not possible punch hole on append only file */
1631 if (IS_APPEND(inode
) || IS_IMMUTABLE(inode
)) {
1635 if (IS_SWAPFILE(inode
)) {
1640 /* No need to punch hole beyond i_size */
1641 if (offset
>= inode
->i_size
)
1645 * If the hole extents beyond i_size, set the hole
1646 * to end after the page that contains i_size
1648 if (offset
+ length
> inode
->i_size
) {
1649 length
= inode
->i_size
+
1650 PAGE_CACHE_SIZE
- (inode
->i_size
& (PAGE_CACHE_SIZE
- 1)) -
1654 first_page
= (offset
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
1655 last_page
= (offset
+ length
) >> PAGE_CACHE_SHIFT
;
1657 first_page_offset
= first_page
<< PAGE_CACHE_SHIFT
;
1658 last_page_offset
= last_page
<< PAGE_CACHE_SHIFT
;
1660 /* Now release the pages */
1661 if (last_page_offset
> first_page_offset
) {
1662 truncate_pagecache_range(inode
, first_page_offset
,
1663 last_page_offset
- 1);
1666 /* Wait all existing dio works, newcomers will block on i_mutex */
1667 inode_dio_wait(inode
);
1669 handle
= start_transaction(inode
);
1674 * Now we need to zero out the non-page-aligned data in the
1675 * pages at the start and tail of the hole, and unmap the buffer
1676 * heads for the block aligned regions of the page that were
1679 if (first_page
> last_page
) {
1681 * If the file space being truncated is contained within a page
1682 * just zero out and unmap the middle of that page
1684 err
= ext4_discard_partial_page_buffers(handle
,
1685 mapping
, offset
, length
, 0);
1690 * Zero out and unmap the paritial page that contains
1691 * the start of the hole
1693 page_len
= first_page_offset
- offset
;
1695 err
= ext4_discard_partial_page_buffers(handle
, mapping
,
1696 offset
, page_len
, 0);
1702 * Zero out and unmap the partial page that contains
1703 * the end of the hole
1705 page_len
= offset
+ length
- last_page_offset
;
1707 err
= ext4_discard_partial_page_buffers(handle
, mapping
,
1708 last_page_offset
, page_len
, 0);
1715 * If i_size contained in the last page, we need to
1716 * unmap and zero the paritial page after i_size
1718 if (inode
->i_size
>> PAGE_CACHE_SHIFT
== last_page
&&
1719 inode
->i_size
% PAGE_CACHE_SIZE
!= 0) {
1720 page_len
= PAGE_CACHE_SIZE
-
1721 (inode
->i_size
& (PAGE_CACHE_SIZE
- 1));
1723 err
= ext4_discard_partial_page_buffers(handle
,
1724 mapping
, inode
->i_size
, page_len
, 0);
1730 first_block
= (offset
+ sb
->s_blocksize
- 1) >>
1731 EXT4_BLOCK_SIZE_BITS(sb
);
1732 stop_block
= (offset
+ length
) >> EXT4_BLOCK_SIZE_BITS(sb
);
1734 if (first_block
>= stop_block
)
1737 down_write(&EXT4_I(inode
)->i_data_sem
);
1738 ext4_discard_preallocations(inode
);
1740 err
= ext4_es_remove_extent(inode
, first_block
,
1741 stop_block
- first_block
);
1742 err
= ext4_free_hole_blocks(handle
, inode
, first_block
, stop_block
);
1744 ext4_discard_preallocations(inode
);
1747 ext4_handle_sync(handle
);
1749 up_write(&EXT4_I(inode
)->i_data_sem
);
1752 inode
->i_mtime
= inode
->i_ctime
= ext4_current_time(inode
);
1753 ext4_mark_inode_dirty(handle
, inode
);
1754 ext4_journal_stop(handle
);
1757 mutex_unlock(&inode
->i_mutex
);