2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * Copyright (c) 2012 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_inode.h"
29 #include "xfs_btree.h"
30 #include "xfs_trans.h"
31 #include "xfs_extfree_item.h"
32 #include "xfs_alloc.h"
34 #include "xfs_bmap_util.h"
35 #include "xfs_bmap_btree.h"
36 #include "xfs_rtalloc.h"
37 #include "xfs_error.h"
38 #include "xfs_quota.h"
39 #include "xfs_trans_space.h"
40 #include "xfs_trace.h"
41 #include "xfs_icache.h"
44 /* Kernel only BMAP related definitions and functions */
47 * Convert the given file system block to a disk block. We have to treat it
48 * differently based on whether the file is a real time file or not, because the
52 xfs_fsb_to_db(struct xfs_inode
*ip
, xfs_fsblock_t fsb
)
54 return (XFS_IS_REALTIME_INODE(ip
) ? \
55 (xfs_daddr_t
)XFS_FSB_TO_BB((ip
)->i_mount
, (fsb
)) : \
56 XFS_FSB_TO_DADDR((ip
)->i_mount
, (fsb
)));
60 * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
61 * caller. Frees all the extents that need freeing, which must be done
62 * last due to locking considerations. We never free any extents in
63 * the first transaction.
65 * Return 1 if the given transaction was committed and a new one
66 * started, and 0 otherwise in the committed parameter.
70 struct xfs_trans
**tp
, /* transaction pointer addr */
71 struct xfs_bmap_free
*flist
, /* i/o: list extents to free */
72 int *committed
)/* xact committed or not */
74 struct xfs_efd_log_item
*efd
; /* extent free data */
75 struct xfs_efi_log_item
*efi
; /* extent free intention */
76 int error
; /* error return value */
77 struct xfs_bmap_free_item
*free
; /* free extent item */
78 struct xfs_bmap_free_item
*next
; /* next item on free list */
80 ASSERT((*tp
)->t_flags
& XFS_TRANS_PERM_LOG_RES
);
81 if (flist
->xbf_count
== 0) {
85 efi
= xfs_trans_get_efi(*tp
, flist
->xbf_count
);
86 for (free
= flist
->xbf_first
; free
; free
= free
->xbfi_next
)
87 xfs_trans_log_efi_extent(*tp
, efi
, free
->xbfi_startblock
,
88 free
->xbfi_blockcount
);
90 error
= __xfs_trans_roll(tp
, NULL
, committed
);
93 * If the transaction was committed, drop the EFD reference
94 * since we're bailing out of here. The other reference is
95 * dropped when the EFI hits the AIL.
97 * If the transaction was not committed, the EFI is freed by the
98 * EFI item unlock handler on abort. Also, we have a new
99 * transaction so we should return committed=1 even though we're
100 * returning an error.
103 xfs_efi_release(efi
);
104 xfs_force_shutdown((*tp
)->t_mountp
,
105 (error
== -EFSCORRUPTED
) ?
106 SHUTDOWN_CORRUPT_INCORE
:
107 SHUTDOWN_META_IO_ERROR
);
116 * Get an EFD and free each extent in the list, logging to the EFD in
117 * the process. The remaining bmap free list is cleaned up by the caller
120 efd
= xfs_trans_get_efd(*tp
, efi
, flist
->xbf_count
);
121 for (free
= flist
->xbf_first
; free
!= NULL
; free
= next
) {
122 next
= free
->xbfi_next
;
124 error
= xfs_trans_free_extent(*tp
, efd
, free
->xbfi_startblock
,
125 free
->xbfi_blockcount
);
129 xfs_bmap_del_free(flist
, NULL
, free
);
137 struct xfs_bmalloca
*ap
) /* bmap alloc argument struct */
139 xfs_alloctype_t atype
= 0; /* type for allocation routines */
140 int error
; /* error return value */
141 xfs_mount_t
*mp
; /* mount point structure */
142 xfs_extlen_t prod
= 0; /* product factor for allocators */
143 xfs_extlen_t ralen
= 0; /* realtime allocation length */
144 xfs_extlen_t align
; /* minimum allocation alignment */
147 mp
= ap
->ip
->i_mount
;
148 align
= xfs_get_extsz_hint(ap
->ip
);
149 prod
= align
/ mp
->m_sb
.sb_rextsize
;
150 error
= xfs_bmap_extsize_align(mp
, &ap
->got
, &ap
->prev
,
151 align
, 1, ap
->eof
, 0,
152 ap
->conv
, &ap
->offset
, &ap
->length
);
156 ASSERT(ap
->length
% mp
->m_sb
.sb_rextsize
== 0);
159 * If the offset & length are not perfectly aligned
160 * then kill prod, it will just get us in trouble.
162 if (do_mod(ap
->offset
, align
) || ap
->length
% align
)
165 * Set ralen to be the actual requested length in rtextents.
167 ralen
= ap
->length
/ mp
->m_sb
.sb_rextsize
;
169 * If the old value was close enough to MAXEXTLEN that
170 * we rounded up to it, cut it back so it's valid again.
171 * Note that if it's a really large request (bigger than
172 * MAXEXTLEN), we don't hear about that number, and can't
173 * adjust the starting point to match it.
175 if (ralen
* mp
->m_sb
.sb_rextsize
>= MAXEXTLEN
)
176 ralen
= MAXEXTLEN
/ mp
->m_sb
.sb_rextsize
;
179 * Lock out other modifications to the RT bitmap inode.
181 xfs_ilock(mp
->m_rbmip
, XFS_ILOCK_EXCL
);
182 xfs_trans_ijoin(ap
->tp
, mp
->m_rbmip
, XFS_ILOCK_EXCL
);
185 * If it's an allocation to an empty file at offset 0,
186 * pick an extent that will space things out in the rt area.
188 if (ap
->eof
&& ap
->offset
== 0) {
189 xfs_rtblock_t
uninitialized_var(rtx
); /* realtime extent no */
191 error
= xfs_rtpick_extent(mp
, ap
->tp
, ralen
, &rtx
);
194 ap
->blkno
= rtx
* mp
->m_sb
.sb_rextsize
;
199 xfs_bmap_adjacent(ap
);
202 * Realtime allocation, done through xfs_rtallocate_extent.
204 atype
= ap
->blkno
== 0 ? XFS_ALLOCTYPE_ANY_AG
: XFS_ALLOCTYPE_NEAR_BNO
;
205 do_div(ap
->blkno
, mp
->m_sb
.sb_rextsize
);
208 if ((error
= xfs_rtallocate_extent(ap
->tp
, ap
->blkno
, 1, ap
->length
,
209 &ralen
, atype
, ap
->wasdel
, prod
, &rtb
)))
211 if (rtb
== NULLFSBLOCK
&& prod
> 1 &&
212 (error
= xfs_rtallocate_extent(ap
->tp
, ap
->blkno
, 1,
213 ap
->length
, &ralen
, atype
,
214 ap
->wasdel
, 1, &rtb
)))
217 if (ap
->blkno
!= NULLFSBLOCK
) {
218 ap
->blkno
*= mp
->m_sb
.sb_rextsize
;
219 ralen
*= mp
->m_sb
.sb_rextsize
;
221 ap
->ip
->i_d
.di_nblocks
+= ralen
;
222 xfs_trans_log_inode(ap
->tp
, ap
->ip
, XFS_ILOG_CORE
);
224 ap
->ip
->i_delayed_blks
-= ralen
;
226 * Adjust the disk quota also. This was reserved
229 xfs_trans_mod_dquot_byino(ap
->tp
, ap
->ip
,
230 ap
->wasdel
? XFS_TRANS_DQ_DELRTBCOUNT
:
231 XFS_TRANS_DQ_RTBCOUNT
, (long) ralen
);
239 * Check if the endoff is outside the last extent. If so the caller will grow
240 * the allocation to a stripe unit boundary. All offsets are considered outside
241 * the end of file for an empty fork, so 1 is returned in *eof in that case.
245 struct xfs_inode
*ip
,
246 xfs_fileoff_t endoff
,
250 struct xfs_bmbt_irec rec
;
253 error
= xfs_bmap_last_extent(NULL
, ip
, whichfork
, &rec
, eof
);
257 *eof
= endoff
>= rec
.br_startoff
+ rec
.br_blockcount
;
262 * Extent tree block counting routines.
266 * Count leaf blocks given a range of extent records.
269 xfs_bmap_count_leaves(
277 for (b
= 0; b
< numrecs
; b
++) {
278 xfs_bmbt_rec_host_t
*frp
= xfs_iext_get_ext(ifp
, idx
+ b
);
279 *count
+= xfs_bmbt_get_blockcount(frp
);
284 * Count leaf blocks given a range of extent records originally
288 xfs_bmap_disk_count_leaves(
289 struct xfs_mount
*mp
,
290 struct xfs_btree_block
*block
,
297 for (b
= 1; b
<= numrecs
; b
++) {
298 frp
= XFS_BMBT_REC_ADDR(mp
, block
, b
);
299 *count
+= xfs_bmbt_disk_get_blockcount(frp
);
304 * Recursively walks each level of a btree
305 * to count total fsblocks in use.
307 STATIC
int /* error */
309 xfs_mount_t
*mp
, /* file system mount point */
310 xfs_trans_t
*tp
, /* transaction pointer */
311 xfs_ifork_t
*ifp
, /* inode fork pointer */
312 xfs_fsblock_t blockno
, /* file system block number */
313 int levelin
, /* level in btree */
314 int *count
) /* Count of blocks */
320 xfs_fsblock_t bno
= blockno
;
321 xfs_fsblock_t nextbno
;
322 struct xfs_btree_block
*block
, *nextblock
;
325 error
= xfs_btree_read_bufl(mp
, tp
, bno
, 0, &bp
, XFS_BMAP_BTREE_REF
,
330 block
= XFS_BUF_TO_BLOCK(bp
);
333 /* Not at node above leaves, count this level of nodes */
334 nextbno
= be64_to_cpu(block
->bb_u
.l
.bb_rightsib
);
335 while (nextbno
!= NULLFSBLOCK
) {
336 error
= xfs_btree_read_bufl(mp
, tp
, nextbno
, 0, &nbp
,
342 nextblock
= XFS_BUF_TO_BLOCK(nbp
);
343 nextbno
= be64_to_cpu(nextblock
->bb_u
.l
.bb_rightsib
);
344 xfs_trans_brelse(tp
, nbp
);
347 /* Dive to the next level */
348 pp
= XFS_BMBT_PTR_ADDR(mp
, block
, 1, mp
->m_bmap_dmxr
[1]);
349 bno
= be64_to_cpu(*pp
);
350 if (unlikely((error
=
351 xfs_bmap_count_tree(mp
, tp
, ifp
, bno
, level
, count
)) < 0)) {
352 xfs_trans_brelse(tp
, bp
);
353 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
354 XFS_ERRLEVEL_LOW
, mp
);
355 return -EFSCORRUPTED
;
357 xfs_trans_brelse(tp
, bp
);
359 /* count all level 1 nodes and their leaves */
361 nextbno
= be64_to_cpu(block
->bb_u
.l
.bb_rightsib
);
362 numrecs
= be16_to_cpu(block
->bb_numrecs
);
363 xfs_bmap_disk_count_leaves(mp
, block
, numrecs
, count
);
364 xfs_trans_brelse(tp
, bp
);
365 if (nextbno
== NULLFSBLOCK
)
368 error
= xfs_btree_read_bufl(mp
, tp
, bno
, 0, &bp
,
374 block
= XFS_BUF_TO_BLOCK(bp
);
381 * Count fsblocks of the given fork.
384 xfs_bmap_count_blocks(
385 xfs_trans_t
*tp
, /* transaction pointer */
386 xfs_inode_t
*ip
, /* incore inode */
387 int whichfork
, /* data or attr fork */
388 int *count
) /* out: count of blocks */
390 struct xfs_btree_block
*block
; /* current btree block */
391 xfs_fsblock_t bno
; /* block # of "block" */
392 xfs_ifork_t
*ifp
; /* fork structure */
393 int level
; /* btree level, for checking */
394 xfs_mount_t
*mp
; /* file system mount structure */
395 __be64
*pp
; /* pointer to block address */
399 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
400 if ( XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_EXTENTS
) {
401 xfs_bmap_count_leaves(ifp
, 0,
402 ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
),
408 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
410 block
= ifp
->if_broot
;
411 level
= be16_to_cpu(block
->bb_level
);
413 pp
= XFS_BMAP_BROOT_PTR_ADDR(mp
, block
, 1, ifp
->if_broot_bytes
);
414 bno
= be64_to_cpu(*pp
);
415 ASSERT(bno
!= NULLFSBLOCK
);
416 ASSERT(XFS_FSB_TO_AGNO(mp
, bno
) < mp
->m_sb
.sb_agcount
);
417 ASSERT(XFS_FSB_TO_AGBNO(mp
, bno
) < mp
->m_sb
.sb_agblocks
);
419 if (unlikely(xfs_bmap_count_tree(mp
, tp
, ifp
, bno
, level
, count
) < 0)) {
420 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW
,
422 return -EFSCORRUPTED
;
429 * returns 1 for success, 0 if we failed to map the extent.
432 xfs_getbmapx_fix_eof_hole(
433 xfs_inode_t
*ip
, /* xfs incore inode pointer */
434 struct getbmapx
*out
, /* output structure */
435 int prealloced
, /* this is a file with
436 * preallocated data space */
437 __int64_t end
, /* last block requested */
438 xfs_fsblock_t startblock
)
441 xfs_mount_t
*mp
; /* file system mount point */
442 xfs_ifork_t
*ifp
; /* inode fork pointer */
443 xfs_extnum_t lastx
; /* last extent pointer */
444 xfs_fileoff_t fileblock
;
446 if (startblock
== HOLESTARTBLOCK
) {
449 fixlen
= XFS_FSB_TO_BB(mp
, XFS_B_TO_FSB(mp
, XFS_ISIZE(ip
)));
450 fixlen
-= out
->bmv_offset
;
451 if (prealloced
&& out
->bmv_offset
+ out
->bmv_length
== end
) {
452 /* Came to hole at EOF. Trim it. */
455 out
->bmv_length
= fixlen
;
458 if (startblock
== DELAYSTARTBLOCK
)
461 out
->bmv_block
= xfs_fsb_to_db(ip
, startblock
);
462 fileblock
= XFS_BB_TO_FSB(ip
->i_mount
, out
->bmv_offset
);
463 ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
464 if (xfs_iext_bno_to_ext(ifp
, fileblock
, &lastx
) &&
465 (lastx
== (ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
))-1))
466 out
->bmv_oflags
|= BMV_OF_LAST
;
473 * Get inode's extents as described in bmv, and format for output.
474 * Calls formatter to fill the user's buffer until all extents
475 * are mapped, until the passed-in bmv->bmv_count slots have
476 * been filled, or until the formatter short-circuits the loop,
477 * if it is tracking filled-in extents on its own.
482 struct getbmapx
*bmv
, /* user bmap structure */
483 xfs_bmap_format_t formatter
, /* format to user */
484 void *arg
) /* formatter arg */
486 __int64_t bmvend
; /* last block requested */
487 int error
= 0; /* return value */
488 __int64_t fixlen
; /* length for -1 case */
489 int i
; /* extent number */
490 int lock
; /* lock state */
491 xfs_bmbt_irec_t
*map
; /* buffer for user's data */
492 xfs_mount_t
*mp
; /* file system mount point */
493 int nex
; /* # of user extents can do */
494 int nexleft
; /* # of user extents left */
495 int subnex
; /* # of bmapi's can do */
496 int nmap
; /* number of map entries */
497 struct getbmapx
*out
; /* output structure */
498 int whichfork
; /* data or attr fork */
499 int prealloced
; /* this is a file with
500 * preallocated data space */
501 int iflags
; /* interface flags */
502 int bmapi_flags
; /* flags for xfs_bmapi */
506 iflags
= bmv
->bmv_iflags
;
507 whichfork
= iflags
& BMV_IF_ATTRFORK
? XFS_ATTR_FORK
: XFS_DATA_FORK
;
509 if (whichfork
== XFS_ATTR_FORK
) {
510 if (XFS_IFORK_Q(ip
)) {
511 if (ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_EXTENTS
&&
512 ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_BTREE
&&
513 ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_LOCAL
)
516 ip
->i_d
.di_aformat
!= 0 &&
517 ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_EXTENTS
)) {
518 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW
,
520 return -EFSCORRUPTED
;
526 if (ip
->i_d
.di_format
!= XFS_DINODE_FMT_EXTENTS
&&
527 ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
&&
528 ip
->i_d
.di_format
!= XFS_DINODE_FMT_LOCAL
)
531 if (xfs_get_extsz_hint(ip
) ||
532 ip
->i_d
.di_flags
& (XFS_DIFLAG_PREALLOC
|XFS_DIFLAG_APPEND
)){
534 fixlen
= mp
->m_super
->s_maxbytes
;
537 fixlen
= XFS_ISIZE(ip
);
541 if (bmv
->bmv_length
== -1) {
542 fixlen
= XFS_FSB_TO_BB(mp
, XFS_B_TO_FSB(mp
, fixlen
));
544 max_t(__int64_t
, fixlen
- bmv
->bmv_offset
, 0);
545 } else if (bmv
->bmv_length
== 0) {
546 bmv
->bmv_entries
= 0;
548 } else if (bmv
->bmv_length
< 0) {
552 nex
= bmv
->bmv_count
- 1;
555 bmvend
= bmv
->bmv_offset
+ bmv
->bmv_length
;
558 if (bmv
->bmv_count
> ULONG_MAX
/ sizeof(struct getbmapx
))
560 out
= kmem_zalloc_large(bmv
->bmv_count
* sizeof(struct getbmapx
), 0);
564 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
565 if (whichfork
== XFS_DATA_FORK
) {
566 if (!(iflags
& BMV_IF_DELALLOC
) &&
567 (ip
->i_delayed_blks
|| XFS_ISIZE(ip
) > ip
->i_d
.di_size
)) {
568 error
= filemap_write_and_wait(VFS_I(ip
)->i_mapping
);
570 goto out_unlock_iolock
;
573 * Even after flushing the inode, there can still be
574 * delalloc blocks on the inode beyond EOF due to
575 * speculative preallocation. These are not removed
576 * until the release function is called or the inode
577 * is inactivated. Hence we cannot assert here that
578 * ip->i_delayed_blks == 0.
582 lock
= xfs_ilock_data_map_shared(ip
);
584 lock
= xfs_ilock_attr_map_shared(ip
);
588 * Don't let nex be bigger than the number of extents
589 * we can have assuming alternating holes and real extents.
591 if (nex
> XFS_IFORK_NEXTENTS(ip
, whichfork
) * 2 + 1)
592 nex
= XFS_IFORK_NEXTENTS(ip
, whichfork
) * 2 + 1;
594 bmapi_flags
= xfs_bmapi_aflag(whichfork
);
595 if (!(iflags
& BMV_IF_PREALLOC
))
596 bmapi_flags
|= XFS_BMAPI_IGSTATE
;
599 * Allocate enough space to handle "subnex" maps at a time.
603 map
= kmem_alloc(subnex
* sizeof(*map
), KM_MAYFAIL
| KM_NOFS
);
605 goto out_unlock_ilock
;
607 bmv
->bmv_entries
= 0;
609 if (XFS_IFORK_NEXTENTS(ip
, whichfork
) == 0 &&
610 (whichfork
== XFS_ATTR_FORK
|| !(iflags
& BMV_IF_DELALLOC
))) {
618 nmap
= (nexleft
> subnex
) ? subnex
: nexleft
;
619 error
= xfs_bmapi_read(ip
, XFS_BB_TO_FSBT(mp
, bmv
->bmv_offset
),
620 XFS_BB_TO_FSB(mp
, bmv
->bmv_length
),
621 map
, &nmap
, bmapi_flags
);
624 ASSERT(nmap
<= subnex
);
626 for (i
= 0; i
< nmap
&& nexleft
&& bmv
->bmv_length
; i
++) {
627 out
[cur_ext
].bmv_oflags
= 0;
628 if (map
[i
].br_state
== XFS_EXT_UNWRITTEN
)
629 out
[cur_ext
].bmv_oflags
|= BMV_OF_PREALLOC
;
630 else if (map
[i
].br_startblock
== DELAYSTARTBLOCK
)
631 out
[cur_ext
].bmv_oflags
|= BMV_OF_DELALLOC
;
632 out
[cur_ext
].bmv_offset
=
633 XFS_FSB_TO_BB(mp
, map
[i
].br_startoff
);
634 out
[cur_ext
].bmv_length
=
635 XFS_FSB_TO_BB(mp
, map
[i
].br_blockcount
);
636 out
[cur_ext
].bmv_unused1
= 0;
637 out
[cur_ext
].bmv_unused2
= 0;
640 * delayed allocation extents that start beyond EOF can
641 * occur due to speculative EOF allocation when the
642 * delalloc extent is larger than the largest freespace
643 * extent at conversion time. These extents cannot be
644 * converted by data writeback, so can exist here even
645 * if we are not supposed to be finding delalloc
648 if (map
[i
].br_startblock
== DELAYSTARTBLOCK
&&
649 map
[i
].br_startoff
<= XFS_B_TO_FSB(mp
, XFS_ISIZE(ip
)))
650 ASSERT((iflags
& BMV_IF_DELALLOC
) != 0);
652 if (map
[i
].br_startblock
== HOLESTARTBLOCK
&&
653 whichfork
== XFS_ATTR_FORK
) {
654 /* came to the end of attribute fork */
655 out
[cur_ext
].bmv_oflags
|= BMV_OF_LAST
;
659 if (!xfs_getbmapx_fix_eof_hole(ip
, &out
[cur_ext
],
661 map
[i
].br_startblock
))
665 out
[cur_ext
].bmv_offset
+
666 out
[cur_ext
].bmv_length
;
668 max_t(__int64_t
, 0, bmvend
- bmv
->bmv_offset
);
671 * In case we don't want to return the hole,
672 * don't increase cur_ext so that we can reuse
673 * it in the next loop.
675 if ((iflags
& BMV_IF_NO_HOLES
) &&
676 map
[i
].br_startblock
== HOLESTARTBLOCK
) {
677 memset(&out
[cur_ext
], 0, sizeof(out
[cur_ext
]));
685 } while (nmap
&& nexleft
&& bmv
->bmv_length
);
690 xfs_iunlock(ip
, lock
);
692 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
694 for (i
= 0; i
< cur_ext
; i
++) {
695 int full
= 0; /* user array is full */
697 /* format results & advance arg */
698 error
= formatter(&arg
, &out
[i
], &full
);
708 * dead simple method of punching delalyed allocation blocks from a range in
709 * the inode. Walks a block at a time so will be slow, but is only executed in
710 * rare error cases so the overhead is not critical. This will always punch out
711 * both the start and end blocks, even if the ranges only partially overlap
712 * them, so it is up to the caller to ensure that partial blocks are not
716 xfs_bmap_punch_delalloc_range(
717 struct xfs_inode
*ip
,
718 xfs_fileoff_t start_fsb
,
719 xfs_fileoff_t length
)
721 xfs_fileoff_t remaining
= length
;
724 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
728 xfs_bmbt_irec_t imap
;
730 xfs_fsblock_t firstblock
;
731 xfs_bmap_free_t flist
;
734 * Map the range first and check that it is a delalloc extent
735 * before trying to unmap the range. Otherwise we will be
736 * trying to remove a real extent (which requires a
737 * transaction) or a hole, which is probably a bad idea...
739 error
= xfs_bmapi_read(ip
, start_fsb
, 1, &imap
, &nimaps
,
743 /* something screwed, just bail */
744 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
745 xfs_alert(ip
->i_mount
,
746 "Failed delalloc mapping lookup ino %lld fsb %lld.",
747 ip
->i_ino
, start_fsb
);
755 if (imap
.br_startblock
!= DELAYSTARTBLOCK
) {
756 /* been converted, ignore */
759 WARN_ON(imap
.br_blockcount
== 0);
762 * Note: while we initialise the firstblock/flist pair, they
763 * should never be used because blocks should never be
764 * allocated or freed for a delalloc extent and hence we need
765 * don't cancel or finish them after the xfs_bunmapi() call.
767 xfs_bmap_init(&flist
, &firstblock
);
768 error
= xfs_bunmapi(NULL
, ip
, start_fsb
, 1, 0, 1, &firstblock
,
773 ASSERT(!flist
.xbf_count
&& !flist
.xbf_first
);
777 } while(remaining
> 0);
783 * Test whether it is appropriate to check an inode for and free post EOF
784 * blocks. The 'force' parameter determines whether we should also consider
785 * regular files that are marked preallocated or append-only.
788 xfs_can_free_eofblocks(struct xfs_inode
*ip
, bool force
)
790 /* prealloc/delalloc exists only on regular files */
791 if (!S_ISREG(ip
->i_d
.di_mode
))
795 * Zero sized files with no cached pages and delalloc blocks will not
796 * have speculative prealloc/delalloc blocks to remove.
798 if (VFS_I(ip
)->i_size
== 0 &&
799 VFS_I(ip
)->i_mapping
->nrpages
== 0 &&
800 ip
->i_delayed_blks
== 0)
803 /* If we haven't read in the extent list, then don't do it now. */
804 if (!(ip
->i_df
.if_flags
& XFS_IFEXTENTS
))
808 * Do not free real preallocated or append-only files unless the file
809 * has delalloc blocks and we are forced to remove them.
811 if (ip
->i_d
.di_flags
& (XFS_DIFLAG_PREALLOC
| XFS_DIFLAG_APPEND
))
812 if (!force
|| ip
->i_delayed_blks
== 0)
819 * This is called by xfs_inactive to free any blocks beyond eof
820 * when the link count isn't zero and by xfs_dm_punch_hole() when
821 * punching a hole to EOF.
831 xfs_fileoff_t end_fsb
;
832 xfs_fileoff_t last_fsb
;
833 xfs_filblks_t map_len
;
835 xfs_bmbt_irec_t imap
;
838 * Figure out if there are any blocks beyond the end
839 * of the file. If not, then there is nothing to do.
841 end_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)XFS_ISIZE(ip
));
842 last_fsb
= XFS_B_TO_FSB(mp
, mp
->m_super
->s_maxbytes
);
843 if (last_fsb
<= end_fsb
)
845 map_len
= last_fsb
- end_fsb
;
848 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
849 error
= xfs_bmapi_read(ip
, end_fsb
, map_len
, &imap
, &nimaps
, 0);
850 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
852 if (!error
&& (nimaps
!= 0) &&
853 (imap
.br_startblock
!= HOLESTARTBLOCK
||
854 ip
->i_delayed_blks
)) {
856 * Attach the dquots to the inode up front.
858 error
= xfs_qm_dqattach(ip
, 0);
863 * There are blocks after the end of file.
864 * Free them up now by truncating the file to
867 tp
= xfs_trans_alloc(mp
, XFS_TRANS_INACTIVE
);
870 if (!xfs_ilock_nowait(ip
, XFS_IOLOCK_EXCL
)) {
871 xfs_trans_cancel(tp
);
876 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_itruncate
, 0, 0);
878 ASSERT(XFS_FORCED_SHUTDOWN(mp
));
879 xfs_trans_cancel(tp
);
881 xfs_iunlock(ip
, XFS_IOLOCK_EXCL
);
885 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
886 xfs_trans_ijoin(tp
, ip
, 0);
889 * Do not update the on-disk file size. If we update the
890 * on-disk file size and then the system crashes before the
891 * contents of the file are flushed to disk then the files
892 * may be full of holes (ie NULL files bug).
894 error
= xfs_itruncate_extents(&tp
, ip
, XFS_DATA_FORK
,
898 * If we get an error at this point we simply don't
899 * bother truncating the file.
901 xfs_trans_cancel(tp
);
903 error
= xfs_trans_commit(tp
);
905 xfs_inode_clear_eofblocks_tag(ip
);
908 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
910 xfs_iunlock(ip
, XFS_IOLOCK_EXCL
);
916 xfs_alloc_file_space(
917 struct xfs_inode
*ip
,
922 xfs_mount_t
*mp
= ip
->i_mount
;
924 xfs_filblks_t allocated_fsb
;
925 xfs_filblks_t allocatesize_fsb
;
926 xfs_extlen_t extsz
, temp
;
927 xfs_fileoff_t startoffset_fsb
;
928 xfs_fsblock_t firstfsb
;
933 xfs_bmbt_irec_t imaps
[1], *imapp
;
934 xfs_bmap_free_t free_list
;
935 uint qblocks
, resblks
, resrtextents
;
939 trace_xfs_alloc_file_space(ip
);
941 if (XFS_FORCED_SHUTDOWN(mp
))
944 error
= xfs_qm_dqattach(ip
, 0);
951 rt
= XFS_IS_REALTIME_INODE(ip
);
952 extsz
= xfs_get_extsz_hint(ip
);
957 startoffset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
958 allocatesize_fsb
= XFS_B_TO_FSB(mp
, count
);
961 * Allocate file space until done or until there is an error
963 while (allocatesize_fsb
&& !error
) {
967 * Determine space reservations for data/realtime.
969 if (unlikely(extsz
)) {
973 e
= startoffset_fsb
+ allocatesize_fsb
;
974 if ((temp
= do_mod(startoffset_fsb
, extsz
)))
976 if ((temp
= do_mod(e
, extsz
)))
980 e
= allocatesize_fsb
;
984 * The transaction reservation is limited to a 32-bit block
985 * count, hence we need to limit the number of blocks we are
986 * trying to reserve to avoid an overflow. We can't allocate
987 * more than @nimaps extents, and an extent is limited on disk
988 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
990 resblks
= min_t(xfs_fileoff_t
, (e
- s
), (MAXEXTLEN
* nimaps
));
992 resrtextents
= qblocks
= resblks
;
993 resrtextents
/= mp
->m_sb
.sb_rextsize
;
994 resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0);
995 quota_flag
= XFS_QMOPT_RES_RTBLKS
;
998 resblks
= qblocks
= XFS_DIOSTRAT_SPACE_RES(mp
, resblks
);
999 quota_flag
= XFS_QMOPT_RES_REGBLKS
;
1003 * Allocate and setup the transaction.
1005 tp
= xfs_trans_alloc(mp
, XFS_TRANS_DIOSTRAT
);
1006 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_write
,
1007 resblks
, resrtextents
);
1009 * Check for running out of space
1013 * Free the transaction structure.
1015 ASSERT(error
== -ENOSPC
|| XFS_FORCED_SHUTDOWN(mp
));
1016 xfs_trans_cancel(tp
);
1019 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1020 error
= xfs_trans_reserve_quota_nblks(tp
, ip
, qblocks
,
1025 xfs_trans_ijoin(tp
, ip
, 0);
1027 xfs_bmap_init(&free_list
, &firstfsb
);
1028 error
= xfs_bmapi_write(tp
, ip
, startoffset_fsb
,
1029 allocatesize_fsb
, alloc_type
, &firstfsb
,
1030 resblks
, imapp
, &nimaps
, &free_list
);
1036 * Complete the transaction
1038 error
= xfs_bmap_finish(&tp
, &free_list
, &committed
);
1043 error
= xfs_trans_commit(tp
);
1044 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1049 allocated_fsb
= imapp
->br_blockcount
;
1056 startoffset_fsb
+= allocated_fsb
;
1057 allocatesize_fsb
-= allocated_fsb
;
1062 error0
: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1063 xfs_bmap_cancel(&free_list
);
1064 xfs_trans_unreserve_quota_nblks(tp
, ip
, (long)qblocks
, 0, quota_flag
);
1066 error1
: /* Just cancel transaction */
1067 xfs_trans_cancel(tp
);
1068 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1073 * Zero file bytes between startoff and endoff inclusive.
1074 * The iolock is held exclusive and no blocks are buffered.
1076 * This function is used by xfs_free_file_space() to zero
1077 * partial blocks when the range to free is not block aligned.
1078 * When unreserving space with boundaries that are not block
1079 * aligned we round up the start and round down the end
1080 * boundaries and then use this function to zero the parts of
1081 * the blocks that got dropped during the rounding.
1084 xfs_zero_remaining_bytes(
1089 xfs_bmbt_irec_t imap
;
1090 xfs_fileoff_t offset_fsb
;
1091 xfs_off_t lastoffset
;
1094 xfs_mount_t
*mp
= ip
->i_mount
;
1099 * Avoid doing I/O beyond eof - it's not necessary
1100 * since nothing can read beyond eof. The space will
1101 * be zeroed when the file is extended anyway.
1103 if (startoff
>= XFS_ISIZE(ip
))
1106 if (endoff
> XFS_ISIZE(ip
))
1107 endoff
= XFS_ISIZE(ip
);
1109 for (offset
= startoff
; offset
<= endoff
; offset
= lastoffset
+ 1) {
1112 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1115 lock_mode
= xfs_ilock_data_map_shared(ip
);
1116 error
= xfs_bmapi_read(ip
, offset_fsb
, 1, &imap
, &nimap
, 0);
1117 xfs_iunlock(ip
, lock_mode
);
1119 if (error
|| nimap
< 1)
1121 ASSERT(imap
.br_blockcount
>= 1);
1122 ASSERT(imap
.br_startoff
== offset_fsb
);
1123 ASSERT(imap
.br_startblock
!= DELAYSTARTBLOCK
);
1125 if (imap
.br_startblock
== HOLESTARTBLOCK
||
1126 imap
.br_state
== XFS_EXT_UNWRITTEN
) {
1127 /* skip the entire extent */
1128 lastoffset
= XFS_FSB_TO_B(mp
, imap
.br_startoff
+
1129 imap
.br_blockcount
) - 1;
1133 lastoffset
= XFS_FSB_TO_B(mp
, imap
.br_startoff
+ 1) - 1;
1134 if (lastoffset
> endoff
)
1135 lastoffset
= endoff
;
1137 /* DAX can just zero the backing device directly */
1138 if (IS_DAX(VFS_I(ip
))) {
1139 error
= dax_zero_page_range(VFS_I(ip
), offset
,
1140 lastoffset
- offset
+ 1,
1141 xfs_get_blocks_direct
);
1147 error
= xfs_buf_read_uncached(XFS_IS_REALTIME_INODE(ip
) ?
1148 mp
->m_rtdev_targp
: mp
->m_ddev_targp
,
1149 xfs_fsb_to_db(ip
, imap
.br_startblock
),
1150 BTOBB(mp
->m_sb
.sb_blocksize
),
1156 (offset
- XFS_FSB_TO_B(mp
, imap
.br_startoff
)),
1157 0, lastoffset
- offset
+ 1);
1159 error
= xfs_bwrite(bp
);
1168 xfs_free_file_space(
1169 struct xfs_inode
*ip
,
1175 xfs_fileoff_t endoffset_fsb
;
1177 xfs_fsblock_t firstfsb
;
1178 xfs_bmap_free_t free_list
;
1179 xfs_bmbt_irec_t imap
;
1181 xfs_off_t iendoffset
;
1188 xfs_fileoff_t startoffset_fsb
;
1193 trace_xfs_free_file_space(ip
);
1195 error
= xfs_qm_dqattach(ip
, 0);
1200 if (len
<= 0) /* if nothing being freed */
1202 rt
= XFS_IS_REALTIME_INODE(ip
);
1203 startoffset_fsb
= XFS_B_TO_FSB(mp
, offset
);
1204 endoffset_fsb
= XFS_B_TO_FSBT(mp
, offset
+ len
);
1206 /* wait for the completion of any pending DIOs */
1207 inode_dio_wait(VFS_I(ip
));
1209 rounding
= max_t(xfs_off_t
, 1 << mp
->m_sb
.sb_blocklog
, PAGE_CACHE_SIZE
);
1210 ioffset
= round_down(offset
, rounding
);
1211 iendoffset
= round_up(offset
+ len
, rounding
) - 1;
1212 error
= filemap_write_and_wait_range(VFS_I(ip
)->i_mapping
, ioffset
,
1216 truncate_pagecache_range(VFS_I(ip
), ioffset
, iendoffset
);
1219 * Need to zero the stuff we're not freeing, on disk.
1220 * If it's a realtime file & can't use unwritten extents then we
1221 * actually need to zero the extent edges. Otherwise xfs_bunmapi
1222 * will take care of it for us.
1224 if (rt
&& !xfs_sb_version_hasextflgbit(&mp
->m_sb
)) {
1226 error
= xfs_bmapi_read(ip
, startoffset_fsb
, 1,
1230 ASSERT(nimap
== 0 || nimap
== 1);
1231 if (nimap
&& imap
.br_startblock
!= HOLESTARTBLOCK
) {
1234 ASSERT(imap
.br_startblock
!= DELAYSTARTBLOCK
);
1235 block
= imap
.br_startblock
;
1236 mod
= do_div(block
, mp
->m_sb
.sb_rextsize
);
1238 startoffset_fsb
+= mp
->m_sb
.sb_rextsize
- mod
;
1241 error
= xfs_bmapi_read(ip
, endoffset_fsb
- 1, 1,
1245 ASSERT(nimap
== 0 || nimap
== 1);
1246 if (nimap
&& imap
.br_startblock
!= HOLESTARTBLOCK
) {
1247 ASSERT(imap
.br_startblock
!= DELAYSTARTBLOCK
);
1249 if (mod
&& (mod
!= mp
->m_sb
.sb_rextsize
))
1250 endoffset_fsb
-= mod
;
1253 if ((done
= (endoffset_fsb
<= startoffset_fsb
)))
1255 * One contiguous piece to clear
1257 error
= xfs_zero_remaining_bytes(ip
, offset
, offset
+ len
- 1);
1260 * Some full blocks, possibly two pieces to clear
1262 if (offset
< XFS_FSB_TO_B(mp
, startoffset_fsb
))
1263 error
= xfs_zero_remaining_bytes(ip
, offset
,
1264 XFS_FSB_TO_B(mp
, startoffset_fsb
) - 1);
1266 XFS_FSB_TO_B(mp
, endoffset_fsb
) < offset
+ len
)
1267 error
= xfs_zero_remaining_bytes(ip
,
1268 XFS_FSB_TO_B(mp
, endoffset_fsb
),
1273 * free file space until done or until there is an error
1275 resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0);
1276 while (!error
&& !done
) {
1279 * allocate and setup the transaction. Allow this
1280 * transaction to dip into the reserve blocks to ensure
1281 * the freeing of the space succeeds at ENOSPC.
1283 tp
= xfs_trans_alloc(mp
, XFS_TRANS_DIOSTRAT
);
1284 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_write
, resblks
, 0);
1287 * check for running out of space
1291 * Free the transaction structure.
1293 ASSERT(error
== -ENOSPC
|| XFS_FORCED_SHUTDOWN(mp
));
1294 xfs_trans_cancel(tp
);
1297 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1298 error
= xfs_trans_reserve_quota(tp
, mp
,
1299 ip
->i_udquot
, ip
->i_gdquot
, ip
->i_pdquot
,
1300 resblks
, 0, XFS_QMOPT_RES_REGBLKS
);
1304 xfs_trans_ijoin(tp
, ip
, 0);
1307 * issue the bunmapi() call to free the blocks
1309 xfs_bmap_init(&free_list
, &firstfsb
);
1310 error
= xfs_bunmapi(tp
, ip
, startoffset_fsb
,
1311 endoffset_fsb
- startoffset_fsb
,
1312 0, 2, &firstfsb
, &free_list
, &done
);
1318 * complete the transaction
1320 error
= xfs_bmap_finish(&tp
, &free_list
, &committed
);
1325 error
= xfs_trans_commit(tp
);
1326 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1333 xfs_bmap_cancel(&free_list
);
1335 xfs_trans_cancel(tp
);
1336 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1341 * Preallocate and zero a range of a file. This mechanism has the allocation
1342 * semantics of fallocate and in addition converts data in the range to zeroes.
1345 xfs_zero_file_space(
1346 struct xfs_inode
*ip
,
1350 struct xfs_mount
*mp
= ip
->i_mount
;
1354 trace_xfs_zero_file_space(ip
);
1356 blksize
= 1 << mp
->m_sb
.sb_blocklog
;
1359 * Punch a hole and prealloc the range. We use hole punch rather than
1360 * unwritten extent conversion for two reasons:
1362 * 1.) Hole punch handles partial block zeroing for us.
1364 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1365 * by virtue of the hole punch.
1367 error
= xfs_free_file_space(ip
, offset
, len
);
1371 error
= xfs_alloc_file_space(ip
, round_down(offset
, blksize
),
1372 round_up(offset
+ len
, blksize
) -
1373 round_down(offset
, blksize
),
1374 XFS_BMAPI_PREALLOC
);
1381 * @next_fsb will keep track of the extent currently undergoing shift.
1382 * @stop_fsb will keep track of the extent at which we have to stop.
1383 * If we are shifting left, we will start with block (offset + len) and
1384 * shift each extent till last extent.
1385 * If we are shifting right, we will start with last extent inside file space
1386 * and continue until we reach the block corresponding to offset.
1389 xfs_shift_file_space(
1390 struct xfs_inode
*ip
,
1393 enum shift_direction direction
)
1396 struct xfs_mount
*mp
= ip
->i_mount
;
1397 struct xfs_trans
*tp
;
1399 struct xfs_bmap_free free_list
;
1400 xfs_fsblock_t first_block
;
1402 xfs_fileoff_t stop_fsb
;
1403 xfs_fileoff_t next_fsb
;
1404 xfs_fileoff_t shift_fsb
;
1406 ASSERT(direction
== SHIFT_LEFT
|| direction
== SHIFT_RIGHT
);
1408 if (direction
== SHIFT_LEFT
) {
1409 next_fsb
= XFS_B_TO_FSB(mp
, offset
+ len
);
1410 stop_fsb
= XFS_B_TO_FSB(mp
, VFS_I(ip
)->i_size
);
1413 * If right shift, delegate the work of initialization of
1414 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
1416 next_fsb
= NULLFSBLOCK
;
1417 stop_fsb
= XFS_B_TO_FSB(mp
, offset
);
1420 shift_fsb
= XFS_B_TO_FSB(mp
, len
);
1423 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1424 * into the accessible region of the file.
1426 if (xfs_can_free_eofblocks(ip
, true)) {
1427 error
= xfs_free_eofblocks(mp
, ip
, false);
1433 * Writeback and invalidate cache for the remainder of the file as we're
1434 * about to shift down every extent from offset to EOF.
1436 error
= filemap_write_and_wait_range(VFS_I(ip
)->i_mapping
,
1440 error
= invalidate_inode_pages2_range(VFS_I(ip
)->i_mapping
,
1441 offset
>> PAGE_CACHE_SHIFT
, -1);
1446 * The extent shiting code works on extent granularity. So, if
1447 * stop_fsb is not the starting block of extent, we need to split
1448 * the extent at stop_fsb.
1450 if (direction
== SHIFT_RIGHT
) {
1451 error
= xfs_bmap_split_extent(ip
, stop_fsb
);
1456 while (!error
&& !done
) {
1457 tp
= xfs_trans_alloc(mp
, XFS_TRANS_DIOSTRAT
);
1459 * We would need to reserve permanent block for transaction.
1460 * This will come into picture when after shifting extent into
1461 * hole we found that adjacent extents can be merged which
1462 * may lead to freeing of a block during record update.
1464 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_write
,
1465 XFS_DIOSTRAT_SPACE_RES(mp
, 0), 0);
1467 xfs_trans_cancel(tp
);
1471 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1472 error
= xfs_trans_reserve_quota(tp
, mp
, ip
->i_udquot
,
1473 ip
->i_gdquot
, ip
->i_pdquot
,
1474 XFS_DIOSTRAT_SPACE_RES(mp
, 0), 0,
1475 XFS_QMOPT_RES_REGBLKS
);
1477 goto out_trans_cancel
;
1479 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
1481 xfs_bmap_init(&free_list
, &first_block
);
1484 * We are using the write transaction in which max 2 bmbt
1485 * updates are allowed
1487 error
= xfs_bmap_shift_extents(tp
, ip
, &next_fsb
, shift_fsb
,
1488 &done
, stop_fsb
, &first_block
, &free_list
,
1489 direction
, XFS_BMAP_MAX_SHIFT_EXTENTS
);
1491 goto out_bmap_cancel
;
1493 error
= xfs_bmap_finish(&tp
, &free_list
, &committed
);
1495 goto out_bmap_cancel
;
1497 error
= xfs_trans_commit(tp
);
1503 xfs_bmap_cancel(&free_list
);
1505 xfs_trans_cancel(tp
);
1510 * xfs_collapse_file_space()
1511 * This routine frees disk space and shift extent for the given file.
1512 * The first thing we do is to free data blocks in the specified range
1513 * by calling xfs_free_file_space(). It would also sync dirty data
1514 * and invalidate page cache over the region on which collapse range
1515 * is working. And Shift extent records to the left to cover a hole.
1522 xfs_collapse_file_space(
1523 struct xfs_inode
*ip
,
1529 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
1530 trace_xfs_collapse_file_space(ip
);
1532 error
= xfs_free_file_space(ip
, offset
, len
);
1536 return xfs_shift_file_space(ip
, offset
, len
, SHIFT_LEFT
);
1540 * xfs_insert_file_space()
1541 * This routine create hole space by shifting extents for the given file.
1542 * The first thing we do is to sync dirty data and invalidate page cache
1543 * over the region on which insert range is working. And split an extent
1544 * to two extents at given offset by calling xfs_bmap_split_extent.
1545 * And shift all extent records which are laying between [offset,
1546 * last allocated extent] to the right to reserve hole range.
1552 xfs_insert_file_space(
1553 struct xfs_inode
*ip
,
1557 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
1558 trace_xfs_insert_file_space(ip
);
1560 return xfs_shift_file_space(ip
, offset
, len
, SHIFT_RIGHT
);
1564 * We need to check that the format of the data fork in the temporary inode is
1565 * valid for the target inode before doing the swap. This is not a problem with
1566 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1567 * data fork depending on the space the attribute fork is taking so we can get
1568 * invalid formats on the target inode.
1570 * E.g. target has space for 7 extents in extent format, temp inode only has
1571 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1572 * btree, but when swapped it needs to be in extent format. Hence we can't just
1573 * blindly swap data forks on attr2 filesystems.
1575 * Note that we check the swap in both directions so that we don't end up with
1576 * a corrupt temporary inode, either.
1578 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1579 * inode will prevent this situation from occurring, so all we do here is
1580 * reject and log the attempt. basically we are putting the responsibility on
1581 * userspace to get this right.
1584 xfs_swap_extents_check_format(
1585 xfs_inode_t
*ip
, /* target inode */
1586 xfs_inode_t
*tip
) /* tmp inode */
1589 /* Should never get a local format */
1590 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_LOCAL
||
1591 tip
->i_d
.di_format
== XFS_DINODE_FMT_LOCAL
)
1595 * if the target inode has less extents that then temporary inode then
1596 * why did userspace call us?
1598 if (ip
->i_d
.di_nextents
< tip
->i_d
.di_nextents
)
1602 * if the target inode is in extent form and the temp inode is in btree
1603 * form then we will end up with the target inode in the wrong format
1604 * as we already know there are less extents in the temp inode.
1606 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_EXTENTS
&&
1607 tip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
)
1610 /* Check temp in extent form to max in target */
1611 if (tip
->i_d
.di_format
== XFS_DINODE_FMT_EXTENTS
&&
1612 XFS_IFORK_NEXTENTS(tip
, XFS_DATA_FORK
) >
1613 XFS_IFORK_MAXEXT(ip
, XFS_DATA_FORK
))
1616 /* Check target in extent form to max in temp */
1617 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_EXTENTS
&&
1618 XFS_IFORK_NEXTENTS(ip
, XFS_DATA_FORK
) >
1619 XFS_IFORK_MAXEXT(tip
, XFS_DATA_FORK
))
1623 * If we are in a btree format, check that the temp root block will fit
1624 * in the target and that it has enough extents to be in btree format
1627 * Note that we have to be careful to allow btree->extent conversions
1628 * (a common defrag case) which will occur when the temp inode is in
1631 if (tip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1632 if (XFS_IFORK_BOFF(ip
) &&
1633 XFS_BMAP_BMDR_SPACE(tip
->i_df
.if_broot
) > XFS_IFORK_BOFF(ip
))
1635 if (XFS_IFORK_NEXTENTS(tip
, XFS_DATA_FORK
) <=
1636 XFS_IFORK_MAXEXT(ip
, XFS_DATA_FORK
))
1640 /* Reciprocal target->temp btree format checks */
1641 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1642 if (XFS_IFORK_BOFF(tip
) &&
1643 XFS_BMAP_BMDR_SPACE(ip
->i_df
.if_broot
) > XFS_IFORK_BOFF(tip
))
1645 if (XFS_IFORK_NEXTENTS(ip
, XFS_DATA_FORK
) <=
1646 XFS_IFORK_MAXEXT(tip
, XFS_DATA_FORK
))
1654 xfs_swap_extent_flush(
1655 struct xfs_inode
*ip
)
1659 error
= filemap_write_and_wait(VFS_I(ip
)->i_mapping
);
1662 truncate_pagecache_range(VFS_I(ip
), 0, -1);
1664 /* Verify O_DIRECT for ftmp */
1665 if (VFS_I(ip
)->i_mapping
->nrpages
)
1672 xfs_inode_t
*ip
, /* target inode */
1673 xfs_inode_t
*tip
, /* tmp inode */
1676 xfs_mount_t
*mp
= ip
->i_mount
;
1678 xfs_bstat_t
*sbp
= &sxp
->sx_stat
;
1679 xfs_ifork_t
*tempifp
, *ifp
, *tifp
;
1680 int src_log_flags
, target_log_flags
;
1687 tempifp
= kmem_alloc(sizeof(xfs_ifork_t
), KM_MAYFAIL
);
1694 * Lock the inodes against other IO, page faults and truncate to
1695 * begin with. Then we can ensure the inodes are flushed and have no
1696 * page cache safely. Once we have done this we can take the ilocks and
1697 * do the rest of the checks.
1699 lock_flags
= XFS_IOLOCK_EXCL
| XFS_MMAPLOCK_EXCL
;
1700 xfs_lock_two_inodes(ip
, tip
, XFS_IOLOCK_EXCL
);
1701 xfs_lock_two_inodes(ip
, tip
, XFS_MMAPLOCK_EXCL
);
1703 /* Verify that both files have the same format */
1704 if ((ip
->i_d
.di_mode
& S_IFMT
) != (tip
->i_d
.di_mode
& S_IFMT
)) {
1709 /* Verify both files are either real-time or non-realtime */
1710 if (XFS_IS_REALTIME_INODE(ip
) != XFS_IS_REALTIME_INODE(tip
)) {
1715 error
= xfs_swap_extent_flush(ip
);
1718 error
= xfs_swap_extent_flush(tip
);
1722 tp
= xfs_trans_alloc(mp
, XFS_TRANS_SWAPEXT
);
1723 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_ichange
, 0, 0);
1725 xfs_trans_cancel(tp
);
1730 * Lock and join the inodes to the tansaction so that transaction commit
1731 * or cancel will unlock the inodes from this point onwards.
1733 xfs_lock_two_inodes(ip
, tip
, XFS_ILOCK_EXCL
);
1734 lock_flags
|= XFS_ILOCK_EXCL
;
1735 xfs_trans_ijoin(tp
, ip
, lock_flags
);
1736 xfs_trans_ijoin(tp
, tip
, lock_flags
);
1739 /* Verify all data are being swapped */
1740 if (sxp
->sx_offset
!= 0 ||
1741 sxp
->sx_length
!= ip
->i_d
.di_size
||
1742 sxp
->sx_length
!= tip
->i_d
.di_size
) {
1744 goto out_trans_cancel
;
1747 trace_xfs_swap_extent_before(ip
, 0);
1748 trace_xfs_swap_extent_before(tip
, 1);
1750 /* check inode formats now that data is flushed */
1751 error
= xfs_swap_extents_check_format(ip
, tip
);
1754 "%s: inode 0x%llx format is incompatible for exchanging.",
1755 __func__
, ip
->i_ino
);
1756 goto out_trans_cancel
;
1760 * Compare the current change & modify times with that
1761 * passed in. If they differ, we abort this swap.
1762 * This is the mechanism used to ensure the calling
1763 * process that the file was not changed out from
1766 if ((sbp
->bs_ctime
.tv_sec
!= VFS_I(ip
)->i_ctime
.tv_sec
) ||
1767 (sbp
->bs_ctime
.tv_nsec
!= VFS_I(ip
)->i_ctime
.tv_nsec
) ||
1768 (sbp
->bs_mtime
.tv_sec
!= VFS_I(ip
)->i_mtime
.tv_sec
) ||
1769 (sbp
->bs_mtime
.tv_nsec
!= VFS_I(ip
)->i_mtime
.tv_nsec
)) {
1771 goto out_trans_cancel
;
1774 * Count the number of extended attribute blocks
1776 if ( ((XFS_IFORK_Q(ip
) != 0) && (ip
->i_d
.di_anextents
> 0)) &&
1777 (ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_LOCAL
)) {
1778 error
= xfs_bmap_count_blocks(tp
, ip
, XFS_ATTR_FORK
, &aforkblks
);
1780 goto out_trans_cancel
;
1782 if ( ((XFS_IFORK_Q(tip
) != 0) && (tip
->i_d
.di_anextents
> 0)) &&
1783 (tip
->i_d
.di_aformat
!= XFS_DINODE_FMT_LOCAL
)) {
1784 error
= xfs_bmap_count_blocks(tp
, tip
, XFS_ATTR_FORK
,
1787 goto out_trans_cancel
;
1791 * Before we've swapped the forks, lets set the owners of the forks
1792 * appropriately. We have to do this as we are demand paging the btree
1793 * buffers, and so the validation done on read will expect the owner
1794 * field to be correctly set. Once we change the owners, we can swap the
1797 * Note the trickiness in setting the log flags - we set the owner log
1798 * flag on the opposite inode (i.e. the inode we are setting the new
1799 * owner to be) because once we swap the forks and log that, log
1800 * recovery is going to see the fork as owned by the swapped inode,
1801 * not the pre-swapped inodes.
1803 src_log_flags
= XFS_ILOG_CORE
;
1804 target_log_flags
= XFS_ILOG_CORE
;
1805 if (ip
->i_d
.di_version
== 3 &&
1806 ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1807 target_log_flags
|= XFS_ILOG_DOWNER
;
1808 error
= xfs_bmbt_change_owner(tp
, ip
, XFS_DATA_FORK
,
1811 goto out_trans_cancel
;
1814 if (tip
->i_d
.di_version
== 3 &&
1815 tip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1816 src_log_flags
|= XFS_ILOG_DOWNER
;
1817 error
= xfs_bmbt_change_owner(tp
, tip
, XFS_DATA_FORK
,
1820 goto out_trans_cancel
;
1824 * Swap the data forks of the inodes
1828 *tempifp
= *ifp
; /* struct copy */
1829 *ifp
= *tifp
; /* struct copy */
1830 *tifp
= *tempifp
; /* struct copy */
1833 * Fix the on-disk inode values
1835 tmp
= (__uint64_t
)ip
->i_d
.di_nblocks
;
1836 ip
->i_d
.di_nblocks
= tip
->i_d
.di_nblocks
- taforkblks
+ aforkblks
;
1837 tip
->i_d
.di_nblocks
= tmp
+ taforkblks
- aforkblks
;
1839 tmp
= (__uint64_t
) ip
->i_d
.di_nextents
;
1840 ip
->i_d
.di_nextents
= tip
->i_d
.di_nextents
;
1841 tip
->i_d
.di_nextents
= tmp
;
1843 tmp
= (__uint64_t
) ip
->i_d
.di_format
;
1844 ip
->i_d
.di_format
= tip
->i_d
.di_format
;
1845 tip
->i_d
.di_format
= tmp
;
1848 * The extents in the source inode could still contain speculative
1849 * preallocation beyond EOF (e.g. the file is open but not modified
1850 * while defrag is in progress). In that case, we need to copy over the
1851 * number of delalloc blocks the data fork in the source inode is
1852 * tracking beyond EOF so that when the fork is truncated away when the
1853 * temporary inode is unlinked we don't underrun the i_delayed_blks
1854 * counter on that inode.
1856 ASSERT(tip
->i_delayed_blks
== 0);
1857 tip
->i_delayed_blks
= ip
->i_delayed_blks
;
1858 ip
->i_delayed_blks
= 0;
1860 switch (ip
->i_d
.di_format
) {
1861 case XFS_DINODE_FMT_EXTENTS
:
1862 /* If the extents fit in the inode, fix the
1863 * pointer. Otherwise it's already NULL or
1864 * pointing to the extent.
1866 if (ip
->i_d
.di_nextents
<= XFS_INLINE_EXTS
) {
1867 ifp
->if_u1
.if_extents
=
1868 ifp
->if_u2
.if_inline_ext
;
1870 src_log_flags
|= XFS_ILOG_DEXT
;
1872 case XFS_DINODE_FMT_BTREE
:
1873 ASSERT(ip
->i_d
.di_version
< 3 ||
1874 (src_log_flags
& XFS_ILOG_DOWNER
));
1875 src_log_flags
|= XFS_ILOG_DBROOT
;
1879 switch (tip
->i_d
.di_format
) {
1880 case XFS_DINODE_FMT_EXTENTS
:
1881 /* If the extents fit in the inode, fix the
1882 * pointer. Otherwise it's already NULL or
1883 * pointing to the extent.
1885 if (tip
->i_d
.di_nextents
<= XFS_INLINE_EXTS
) {
1886 tifp
->if_u1
.if_extents
=
1887 tifp
->if_u2
.if_inline_ext
;
1889 target_log_flags
|= XFS_ILOG_DEXT
;
1891 case XFS_DINODE_FMT_BTREE
:
1892 target_log_flags
|= XFS_ILOG_DBROOT
;
1893 ASSERT(tip
->i_d
.di_version
< 3 ||
1894 (target_log_flags
& XFS_ILOG_DOWNER
));
1898 xfs_trans_log_inode(tp
, ip
, src_log_flags
);
1899 xfs_trans_log_inode(tp
, tip
, target_log_flags
);
1902 * If this is a synchronous mount, make sure that the
1903 * transaction goes to disk before returning to the user.
1905 if (mp
->m_flags
& XFS_MOUNT_WSYNC
)
1906 xfs_trans_set_sync(tp
);
1908 error
= xfs_trans_commit(tp
);
1910 trace_xfs_swap_extent_after(ip
, 0);
1911 trace_xfs_swap_extent_after(tip
, 1);
1917 xfs_iunlock(ip
, lock_flags
);
1918 xfs_iunlock(tip
, lock_flags
);
1922 xfs_trans_cancel(tp
);