2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "xfs_trans.h"
27 #include "xfs_dmapi.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dir_sf.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_alloc.h"
38 #include "xfs_btree.h"
39 #include "xfs_error.h"
41 #include "xfs_iomap.h"
42 #include <linux/mpage.h>
43 #include <linux/writeback.h>
45 STATIC
void xfs_count_page_state(struct page
*, int *, int *, int *);
46 STATIC
void xfs_convert_page(struct inode
*, struct page
*, xfs_iomap_t
*,
47 struct writeback_control
*wbc
, void *, int, int);
49 #if defined(XFS_RW_TRACE)
59 vnode_t
*vp
= LINVFS_GET_VP(inode
);
60 loff_t isize
= i_size_read(inode
);
61 loff_t offset
= (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
;
62 int delalloc
= -1, unmapped
= -1, unwritten
= -1;
64 if (page_has_buffers(page
))
65 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
67 bdp
= vn_bhv_lookup(VN_BHV_HEAD(vp
), &xfs_vnodeops
);
72 ktrace_enter(ip
->i_rwtrace
,
73 (void *)((unsigned long)tag
),
77 (void *)((unsigned long)mask
),
78 (void *)((unsigned long)((ip
->i_d
.di_size
>> 32) & 0xffffffff)),
79 (void *)((unsigned long)(ip
->i_d
.di_size
& 0xffffffff)),
80 (void *)((unsigned long)((isize
>> 32) & 0xffffffff)),
81 (void *)((unsigned long)(isize
& 0xffffffff)),
82 (void *)((unsigned long)((offset
>> 32) & 0xffffffff)),
83 (void *)((unsigned long)(offset
& 0xffffffff)),
84 (void *)((unsigned long)delalloc
),
85 (void *)((unsigned long)unmapped
),
86 (void *)((unsigned long)unwritten
),
91 #define xfs_page_trace(tag, inode, page, mask)
95 * Schedule IO completion handling on a xfsdatad if this was
96 * the final hold on this ioend.
102 if (atomic_dec_and_test(&ioend
->io_remaining
))
103 queue_work(xfsdatad_workqueue
, &ioend
->io_work
);
110 vn_iowake(ioend
->io_vnode
);
111 mempool_free(ioend
, xfs_ioend_pool
);
115 * Issue transactions to convert a buffer range from unwritten
116 * to written extents.
119 xfs_end_bio_unwritten(
122 xfs_ioend_t
*ioend
= data
;
123 vnode_t
*vp
= ioend
->io_vnode
;
124 xfs_off_t offset
= ioend
->io_offset
;
125 size_t size
= ioend
->io_size
;
126 struct buffer_head
*bh
, *next
;
129 if (ioend
->io_uptodate
)
130 VOP_BMAP(vp
, offset
, size
, BMAPI_UNWRITTEN
, NULL
, NULL
, error
);
132 /* ioend->io_buffer_head is only non-NULL for buffered I/O */
133 for (bh
= ioend
->io_buffer_head
; bh
; bh
= next
) {
134 next
= bh
->b_private
;
137 clear_buffer_unwritten(bh
);
138 end_buffer_async_write(bh
, ioend
->io_uptodate
);
141 xfs_destroy_ioend(ioend
);
145 * Allocate and initialise an IO completion structure.
146 * We need to track unwritten extent write completion here initially.
147 * We'll need to extend this for updating the ondisk inode size later
156 ioend
= mempool_alloc(xfs_ioend_pool
, GFP_NOFS
);
159 * Set the count to 1 initially, which will prevent an I/O
160 * completion callback from happening before we have started
161 * all the I/O from calling the completion routine too early.
163 atomic_set(&ioend
->io_remaining
, 1);
164 ioend
->io_uptodate
= 1; /* cleared if any I/O fails */
165 ioend
->io_vnode
= LINVFS_GET_VP(inode
);
166 ioend
->io_buffer_head
= NULL
;
167 atomic_inc(&ioend
->io_vnode
->v_iocount
);
168 ioend
->io_offset
= 0;
171 INIT_WORK(&ioend
->io_work
, xfs_end_bio_unwritten
, ioend
);
177 linvfs_unwritten_done(
178 struct buffer_head
*bh
,
181 xfs_ioend_t
*ioend
= bh
->b_private
;
182 static spinlock_t unwritten_done_lock
= SPIN_LOCK_UNLOCKED
;
185 ASSERT(buffer_unwritten(bh
));
189 ioend
->io_uptodate
= 0;
192 * Deep magic here. We reuse b_private in the buffer_heads to build
193 * a chain for completing the I/O from user context after we've issued
194 * a transaction to convert the unwritten extent.
196 spin_lock_irqsave(&unwritten_done_lock
, flags
);
197 bh
->b_private
= ioend
->io_buffer_head
;
198 ioend
->io_buffer_head
= bh
;
199 spin_unlock_irqrestore(&unwritten_done_lock
, flags
);
201 xfs_finish_ioend(ioend
);
212 vnode_t
*vp
= LINVFS_GET_VP(inode
);
213 int error
, nmaps
= 1;
215 VOP_BMAP(vp
, offset
, count
, flags
, mapp
, &nmaps
, error
);
216 if (!error
&& (flags
& (BMAPI_WRITE
|BMAPI_ALLOCATE
)))
222 * Finds the corresponding mapping in block @map array of the
223 * given @offset within a @page.
229 unsigned long offset
)
231 loff_t full_offset
; /* offset from start of file */
233 ASSERT(offset
< PAGE_CACHE_SIZE
);
235 full_offset
= page
->index
; /* NB: using 64bit number */
236 full_offset
<<= PAGE_CACHE_SHIFT
; /* offset from file start */
237 full_offset
+= offset
; /* offset from page start */
239 if (full_offset
< iomapp
->iomap_offset
)
241 if (iomapp
->iomap_offset
+ (iomapp
->iomap_bsize
-1) >= full_offset
)
249 struct buffer_head
*bh
,
250 unsigned long offset
,
258 ASSERT(!(iomapp
->iomap_flags
& IOMAP_HOLE
));
259 ASSERT(!(iomapp
->iomap_flags
& IOMAP_DELAY
));
260 ASSERT(iomapp
->iomap_bn
!= IOMAP_DADDR_NULL
);
263 delta
<<= PAGE_CACHE_SHIFT
;
265 delta
-= iomapp
->iomap_offset
;
266 delta
>>= block_bits
;
268 sector_shift
= block_bits
- BBSHIFT
;
269 bn
= iomapp
->iomap_bn
>> sector_shift
;
271 BUG_ON(!bn
&& !(iomapp
->iomap_flags
& IOMAP_REALTIME
));
272 ASSERT((bn
<< sector_shift
) >= iomapp
->iomap_bn
);
276 bh
->b_bdev
= iomapp
->iomap_target
->pbr_bdev
;
277 set_buffer_mapped(bh
);
278 clear_buffer_delay(bh
);
282 * Look for a page at index which is unlocked and contains our
283 * unwritten extent flagged buffers at its head. Returns page
284 * locked and with an extra reference count, and length of the
285 * unwritten extent component on this page that we can write,
286 * in units of filesystem blocks.
289 xfs_probe_unwritten_page(
290 struct address_space
*mapping
,
294 unsigned long max_offset
,
300 page
= find_trylock_page(mapping
, index
);
303 if (PageWriteback(page
))
306 if (page
->mapping
&& page_has_buffers(page
)) {
307 struct buffer_head
*bh
, *head
;
308 unsigned long p_offset
= 0;
311 bh
= head
= page_buffers(page
);
313 if (!buffer_unwritten(bh
) || !buffer_uptodate(bh
))
315 if (!xfs_offset_to_map(page
, iomapp
, p_offset
))
317 if (p_offset
>= max_offset
)
319 xfs_map_at_offset(page
, bh
, p_offset
, bbits
, iomapp
);
320 set_buffer_unwritten_io(bh
);
321 bh
->b_private
= ioend
;
322 p_offset
+= bh
->b_size
;
324 } while ((bh
= bh
->b_this_page
) != head
);
336 * Look for a page at index which is unlocked and not mapped
337 * yet - clustering for mmap write case.
340 xfs_probe_unmapped_page(
341 struct address_space
*mapping
,
343 unsigned int pg_offset
)
348 page
= find_trylock_page(mapping
, index
);
351 if (PageWriteback(page
))
354 if (page
->mapping
&& PageDirty(page
)) {
355 if (page_has_buffers(page
)) {
356 struct buffer_head
*bh
, *head
;
358 bh
= head
= page_buffers(page
);
360 if (buffer_mapped(bh
) || !buffer_uptodate(bh
))
363 if (ret
>= pg_offset
)
365 } while ((bh
= bh
->b_this_page
) != head
);
367 ret
= PAGE_CACHE_SIZE
;
376 xfs_probe_unmapped_cluster(
378 struct page
*startpage
,
379 struct buffer_head
*bh
,
380 struct buffer_head
*head
)
382 pgoff_t tindex
, tlast
, tloff
;
383 unsigned int pg_offset
, len
, total
= 0;
384 struct address_space
*mapping
= inode
->i_mapping
;
386 /* First sum forwards in this page */
388 if (buffer_mapped(bh
))
391 } while ((bh
= bh
->b_this_page
) != head
);
393 /* If we reached the end of the page, sum forwards in
397 tlast
= i_size_read(inode
) >> PAGE_CACHE_SHIFT
;
398 /* Prune this back to avoid pathological behavior */
399 tloff
= min(tlast
, startpage
->index
+ 64);
400 for (tindex
= startpage
->index
+ 1; tindex
< tloff
; tindex
++) {
401 len
= xfs_probe_unmapped_page(mapping
, tindex
,
407 if (tindex
== tlast
&&
408 (pg_offset
= i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1))) {
409 total
+= xfs_probe_unmapped_page(mapping
,
417 * Probe for a given page (index) in the inode and test if it is delayed
418 * and without unwritten buffers. Returns page locked and with an extra
422 xfs_probe_delalloc_page(
428 page
= find_trylock_page(inode
->i_mapping
, index
);
431 if (PageWriteback(page
))
434 if (page
->mapping
&& page_has_buffers(page
)) {
435 struct buffer_head
*bh
, *head
;
438 bh
= head
= page_buffers(page
);
440 if (buffer_unwritten(bh
)) {
443 } else if (buffer_delay(bh
)) {
446 } while ((bh
= bh
->b_this_page
) != head
);
460 struct page
*start_page
,
461 struct buffer_head
*head
,
462 struct buffer_head
*curr
,
463 unsigned long p_offset
,
466 struct writeback_control
*wbc
,
470 struct buffer_head
*bh
= curr
;
474 unsigned long nblocks
= 0;
476 offset
= start_page
->index
;
477 offset
<<= PAGE_CACHE_SHIFT
;
480 ioend
= xfs_alloc_ioend(inode
);
482 /* First map forwards in the page consecutive buffers
483 * covering this unwritten extent
486 if (!buffer_unwritten(bh
))
488 tmp
= xfs_offset_to_map(start_page
, iomapp
, p_offset
);
491 xfs_map_at_offset(start_page
, bh
, p_offset
, block_bits
, iomapp
);
492 set_buffer_unwritten_io(bh
);
493 bh
->b_private
= ioend
;
494 p_offset
+= bh
->b_size
;
496 } while ((bh
= bh
->b_this_page
) != head
);
498 atomic_add(nblocks
, &ioend
->io_remaining
);
500 /* If we reached the end of the page, map forwards in any
501 * following pages which are also covered by this extent.
504 struct address_space
*mapping
= inode
->i_mapping
;
505 pgoff_t tindex
, tloff
, tlast
;
507 unsigned int pg_offset
, bbits
= inode
->i_blkbits
;
510 tlast
= i_size_read(inode
) >> PAGE_CACHE_SHIFT
;
511 tloff
= (iomapp
->iomap_offset
+ iomapp
->iomap_bsize
) >> PAGE_CACHE_SHIFT
;
512 tloff
= min(tlast
, tloff
);
513 for (tindex
= start_page
->index
+ 1; tindex
< tloff
; tindex
++) {
514 page
= xfs_probe_unwritten_page(mapping
,
515 tindex
, iomapp
, ioend
,
516 PAGE_CACHE_SIZE
, &bs
, bbits
);
520 atomic_add(bs
, &ioend
->io_remaining
);
521 xfs_convert_page(inode
, page
, iomapp
, wbc
, ioend
,
523 /* stop if converting the next page might add
524 * enough blocks that the corresponding byte
525 * count won't fit in our ulong page buf length */
526 if (nblocks
>= ((ULONG_MAX
- PAGE_SIZE
) >> block_bits
))
530 if (tindex
== tlast
&&
531 (pg_offset
= (i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1)))) {
532 page
= xfs_probe_unwritten_page(mapping
,
533 tindex
, iomapp
, ioend
,
534 pg_offset
, &bs
, bbits
);
537 atomic_add(bs
, &ioend
->io_remaining
);
538 xfs_convert_page(inode
, page
, iomapp
, wbc
, ioend
,
540 if (nblocks
>= ((ULONG_MAX
- PAGE_SIZE
) >> block_bits
))
547 ioend
->io_size
= (xfs_off_t
)nblocks
<< block_bits
;
548 ioend
->io_offset
= offset
;
549 xfs_finish_ioend(ioend
);
556 struct writeback_control
*wbc
,
557 struct buffer_head
*bh_arr
[],
562 struct buffer_head
*bh
;
565 BUG_ON(PageWriteback(page
));
567 set_page_writeback(page
);
569 clear_page_dirty(page
);
573 for (i
= 0; i
< bh_count
; i
++) {
575 mark_buffer_async_write(bh
);
576 if (buffer_unwritten(bh
))
577 set_buffer_unwritten_io(bh
);
578 set_buffer_uptodate(bh
);
579 clear_buffer_dirty(bh
);
582 for (i
= 0; i
< bh_count
; i
++)
583 submit_bh(WRITE
, bh_arr
[i
]);
585 if (probed_page
&& clear_dirty
)
586 wbc
->nr_to_write
--; /* Wrote an "extra" page */
591 * Allocate & map buffers for page given the extent map. Write it out.
592 * except for the original page of a writepage, this is called on
593 * delalloc/unwritten pages only, for the original page it is possible
594 * that the page has no mapping at all.
601 struct writeback_control
*wbc
,
606 struct buffer_head
*bh_arr
[MAX_BUF_PER_PAGE
], *bh
, *head
;
607 xfs_iomap_t
*mp
= iomapp
, *tmp
;
608 unsigned long offset
, end_offset
;
610 int bbits
= inode
->i_blkbits
;
613 end_offset
= (i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1));
616 * page_dirty is initially a count of buffers on the page before
617 * EOF and is decrememted as we move each into a cleanable state.
619 len
= 1 << inode
->i_blkbits
;
620 end_offset
= max(end_offset
, PAGE_CACHE_SIZE
);
621 end_offset
= roundup(end_offset
, len
);
622 page_dirty
= end_offset
/ len
;
625 bh
= head
= page_buffers(page
);
627 if (offset
>= end_offset
)
629 if (!(PageUptodate(page
) || buffer_uptodate(bh
)))
631 if (buffer_mapped(bh
) && all_bh
&&
632 !(buffer_unwritten(bh
) || buffer_delay(bh
))) {
635 bh_arr
[index
++] = bh
;
640 tmp
= xfs_offset_to_map(page
, mp
, offset
);
643 ASSERT(!(tmp
->iomap_flags
& IOMAP_HOLE
));
644 ASSERT(!(tmp
->iomap_flags
& IOMAP_DELAY
));
646 /* If this is a new unwritten extent buffer (i.e. one
647 * that we haven't passed in private data for, we must
648 * now map this buffer too.
650 if (buffer_unwritten(bh
) && !bh
->b_end_io
) {
651 ASSERT(tmp
->iomap_flags
& IOMAP_UNWRITTEN
);
652 xfs_map_unwritten(inode
, page
, head
, bh
, offset
,
653 bbits
, tmp
, wbc
, startio
, all_bh
);
654 } else if (! (buffer_unwritten(bh
) && buffer_locked(bh
))) {
655 xfs_map_at_offset(page
, bh
, offset
, bbits
, tmp
);
656 if (buffer_unwritten(bh
)) {
657 set_buffer_unwritten_io(bh
);
658 bh
->b_private
= private;
663 bh_arr
[index
++] = bh
;
665 set_buffer_dirty(bh
);
667 mark_buffer_dirty(bh
);
670 } while (offset
+= len
, (bh
= bh
->b_this_page
) != head
);
672 if (startio
&& index
) {
673 xfs_submit_page(page
, wbc
, bh_arr
, index
, 1, !page_dirty
);
680 * Convert & write out a cluster of pages in the same extent as defined
681 * by mp and following the start page.
688 struct writeback_control
*wbc
,
695 for (; tindex
<= tlast
; tindex
++) {
696 page
= xfs_probe_delalloc_page(inode
, tindex
);
699 xfs_convert_page(inode
, page
, iomapp
, wbc
, NULL
,
705 * Calling this without startio set means we are being asked to make a dirty
706 * page ready for freeing it's buffers. When called with startio set then
707 * we are coming from writepage.
709 * When called with startio set it is important that we write the WHOLE
711 * The bh->b_state's cannot know if any of the blocks or which block for
712 * that matter are dirty due to mmap writes, and therefore bh uptodate is
713 * only vaild if the page itself isn't completely uptodate. Some layers
714 * may clear the page dirty flag prior to calling write page, under the
715 * assumption the entire page will be written out; by not writing out the
716 * whole page the page can be reused before all valid dirty data is
717 * written out. Note: in the case of a page that has been dirty'd by
718 * mapwrite and but partially setup by block_prepare_write the
719 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
720 * valid state, thus the whole page must be written out thing.
724 xfs_page_state_convert(
727 struct writeback_control
*wbc
,
729 int unmapped
) /* also implies page uptodate */
731 struct buffer_head
*bh_arr
[MAX_BUF_PER_PAGE
], *bh
, *head
;
732 xfs_iomap_t
*iomp
, iomap
;
734 unsigned long p_offset
= 0;
735 __uint64_t end_offset
;
736 pgoff_t end_index
, last_index
, tlast
;
737 int len
, err
, i
, cnt
= 0, uptodate
= 1;
741 /* wait for other IO threads? */
742 flags
= (startio
&& wbc
->sync_mode
!= WB_SYNC_NONE
) ? 0 : BMAPI_TRYLOCK
;
744 /* Is this page beyond the end of the file? */
745 offset
= i_size_read(inode
);
746 end_index
= offset
>> PAGE_CACHE_SHIFT
;
747 last_index
= (offset
- 1) >> PAGE_CACHE_SHIFT
;
748 if (page
->index
>= end_index
) {
749 if ((page
->index
>= end_index
+ 1) ||
750 !(i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1))) {
756 end_offset
= min_t(unsigned long long,
757 (loff_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
, offset
);
758 offset
= (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
;
761 * page_dirty is initially a count of buffers on the page before
762 * EOF and is decrememted as we move each into a cleanable state.
764 len
= 1 << inode
->i_blkbits
;
765 p_offset
= max(p_offset
, PAGE_CACHE_SIZE
);
766 p_offset
= roundup(p_offset
, len
);
767 page_dirty
= p_offset
/ len
;
771 bh
= head
= page_buffers(page
);
774 if (offset
>= end_offset
)
776 if (!buffer_uptodate(bh
))
778 if (!(PageUptodate(page
) || buffer_uptodate(bh
)) && !startio
)
782 iomp
= xfs_offset_to_map(page
, &iomap
, p_offset
);
786 * First case, map an unwritten extent and prepare for
787 * extent state conversion transaction on completion.
789 if (buffer_unwritten(bh
)) {
793 err
= xfs_map_blocks(inode
, offset
, len
, &iomap
,
794 BMAPI_WRITE
|BMAPI_IGNSTATE
);
798 iomp
= xfs_offset_to_map(page
, &iomap
,
803 err
= xfs_map_unwritten(inode
, page
,
805 inode
->i_blkbits
, iomp
,
806 wbc
, startio
, unmapped
);
811 set_bit(BH_Lock
, &bh
->b_state
);
813 BUG_ON(!buffer_locked(bh
));
818 * Second case, allocate space for a delalloc buffer.
819 * We can return EAGAIN here in the release page case.
821 } else if (buffer_delay(bh
)) {
823 err
= xfs_map_blocks(inode
, offset
, len
, &iomap
,
824 BMAPI_ALLOCATE
| flags
);
828 iomp
= xfs_offset_to_map(page
, &iomap
,
832 xfs_map_at_offset(page
, bh
, p_offset
,
833 inode
->i_blkbits
, iomp
);
837 set_buffer_dirty(bh
);
839 mark_buffer_dirty(bh
);
843 } else if ((buffer_uptodate(bh
) || PageUptodate(page
)) &&
844 (unmapped
|| startio
)) {
846 if (!buffer_mapped(bh
)) {
850 * Getting here implies an unmapped buffer
851 * was found, and we are in a path where we
852 * need to write the whole page out.
855 size
= xfs_probe_unmapped_cluster(
856 inode
, page
, bh
, head
);
857 err
= xfs_map_blocks(inode
, offset
,
859 BMAPI_WRITE
|BMAPI_MMAP
);
863 iomp
= xfs_offset_to_map(page
, &iomap
,
867 xfs_map_at_offset(page
,
869 inode
->i_blkbits
, iomp
);
873 set_buffer_dirty(bh
);
875 mark_buffer_dirty(bh
);
879 } else if (startio
) {
880 if (buffer_uptodate(bh
) &&
881 !test_and_set_bit(BH_Lock
, &bh
->b_state
)) {
887 } while (offset
+= len
, p_offset
+= len
,
888 ((bh
= bh
->b_this_page
) != head
));
890 if (uptodate
&& bh
== head
)
891 SetPageUptodate(page
);
894 xfs_submit_page(page
, wbc
, bh_arr
, cnt
, 0, !page_dirty
);
898 offset
= (iomp
->iomap_offset
+ iomp
->iomap_bsize
- 1) >>
900 tlast
= min_t(pgoff_t
, offset
, last_index
);
901 xfs_cluster_write(inode
, page
->index
+ 1, iomp
, wbc
,
902 startio
, unmapped
, tlast
);
908 for (i
= 0; i
< cnt
; i
++) {
909 unlock_buffer(bh_arr
[i
]);
913 * If it's delalloc and we have nowhere to put it,
914 * throw it away, unless the lower layers told
917 if (err
!= -EAGAIN
) {
919 block_invalidatepage(page
, 0);
921 ClearPageUptodate(page
);
930 unsigned long blocks
,
931 struct buffer_head
*bh_result
,
936 vnode_t
*vp
= LINVFS_GET_VP(inode
);
941 loff_t offset
= (loff_t
)iblock
<< inode
->i_blkbits
;
944 size
= blocks
<< inode
->i_blkbits
;
946 size
= 1 << inode
->i_blkbits
;
948 VOP_BMAP(vp
, offset
, size
,
949 create
? flags
: BMAPI_READ
, &iomap
, &retpbbm
, error
);
956 if (iomap
.iomap_bn
!= IOMAP_DADDR_NULL
) {
960 /* For unwritten extents do not report a disk address on
961 * the read case (treat as if we're reading into a hole).
963 if (create
|| !(iomap
.iomap_flags
& IOMAP_UNWRITTEN
)) {
964 delta
= offset
- iomap
.iomap_offset
;
965 delta
>>= inode
->i_blkbits
;
967 bn
= iomap
.iomap_bn
>> (inode
->i_blkbits
- BBSHIFT
);
969 BUG_ON(!bn
&& !(iomap
.iomap_flags
& IOMAP_REALTIME
));
970 bh_result
->b_blocknr
= bn
;
971 set_buffer_mapped(bh_result
);
973 if (create
&& (iomap
.iomap_flags
& IOMAP_UNWRITTEN
)) {
975 bh_result
->b_private
= inode
;
976 set_buffer_unwritten(bh_result
);
977 set_buffer_delay(bh_result
);
981 /* If this is a realtime file, data might be on a new device */
982 bh_result
->b_bdev
= iomap
.iomap_target
->pbr_bdev
;
984 /* If we previously allocated a block out beyond eof and
985 * we are now coming back to use it then we will need to
986 * flag it as new even if it has a disk address.
989 ((!buffer_mapped(bh_result
) && !buffer_uptodate(bh_result
)) ||
990 (offset
>= i_size_read(inode
)) || (iomap
.iomap_flags
& IOMAP_NEW
))) {
991 set_buffer_new(bh_result
);
994 if (iomap
.iomap_flags
& IOMAP_DELAY
) {
997 set_buffer_uptodate(bh_result
);
998 set_buffer_mapped(bh_result
);
999 set_buffer_delay(bh_result
);
1004 bh_result
->b_size
= (ssize_t
)min(
1005 (loff_t
)(iomap
.iomap_bsize
- iomap
.iomap_delta
),
1006 (loff_t
)(blocks
<< inode
->i_blkbits
));
1014 struct inode
*inode
,
1016 struct buffer_head
*bh_result
,
1019 return __linvfs_get_block(inode
, iblock
, 0, bh_result
,
1020 create
, 0, BMAPI_WRITE
);
1024 linvfs_get_blocks_direct(
1025 struct inode
*inode
,
1027 unsigned long max_blocks
,
1028 struct buffer_head
*bh_result
,
1031 return __linvfs_get_block(inode
, iblock
, max_blocks
, bh_result
,
1032 create
, 1, BMAPI_WRITE
|BMAPI_DIRECT
);
1036 linvfs_end_io_direct(
1042 xfs_ioend_t
*ioend
= iocb
->private;
1045 * Non-NULL private data means we need to issue a transaction to
1046 * convert a range from unwritten to written extents. This needs
1047 * to happen from process contect but aio+dio I/O completion
1048 * happens from irq context so we need to defer it to a workqueue.
1049 * This is not nessecary for synchronous direct I/O, but we do
1050 * it anyway to keep the code uniform and simpler.
1052 * The core direct I/O code might be changed to always call the
1053 * completion handler in the future, in which case all this can
1056 if (private && size
> 0) {
1057 ioend
->io_offset
= offset
;
1058 ioend
->io_size
= size
;
1059 xfs_finish_ioend(ioend
);
1062 xfs_destroy_ioend(ioend
);
1066 * blockdev_direct_IO can return an error even afer the I/O
1067 * completion handler was called. Thus we need to protect
1068 * against double-freeing.
1070 iocb
->private = NULL
;
1077 const struct iovec
*iov
,
1079 unsigned long nr_segs
)
1081 struct file
*file
= iocb
->ki_filp
;
1082 struct inode
*inode
= file
->f_mapping
->host
;
1083 vnode_t
*vp
= LINVFS_GET_VP(inode
);
1089 VOP_BMAP(vp
, offset
, 0, BMAPI_DEVICE
, &iomap
, &maps
, error
);
1093 iocb
->private = xfs_alloc_ioend(inode
);
1095 ret
= blockdev_direct_IO_own_locking(rw
, iocb
, inode
,
1096 iomap
.iomap_target
->pbr_bdev
,
1097 iov
, offset
, nr_segs
,
1098 linvfs_get_blocks_direct
,
1099 linvfs_end_io_direct
);
1101 if (unlikely(ret
<= 0 && iocb
->private))
1102 xfs_destroy_ioend(iocb
->private);
1109 struct address_space
*mapping
,
1112 struct inode
*inode
= (struct inode
*)mapping
->host
;
1113 vnode_t
*vp
= LINVFS_GET_VP(inode
);
1116 vn_trace_entry(vp
, "linvfs_bmap", (inst_t
*)__return_address
);
1118 VOP_RWLOCK(vp
, VRWLOCK_READ
);
1119 VOP_FLUSH_PAGES(vp
, (xfs_off_t
)0, -1, 0, FI_REMAPF
, error
);
1120 VOP_RWUNLOCK(vp
, VRWLOCK_READ
);
1121 return generic_block_bmap(mapping
, block
, linvfs_get_block
);
1126 struct file
*unused
,
1129 return mpage_readpage(page
, linvfs_get_block
);
1134 struct file
*unused
,
1135 struct address_space
*mapping
,
1136 struct list_head
*pages
,
1139 return mpage_readpages(mapping
, pages
, nr_pages
, linvfs_get_block
);
1143 xfs_count_page_state(
1149 struct buffer_head
*bh
, *head
;
1151 *delalloc
= *unmapped
= *unwritten
= 0;
1153 bh
= head
= page_buffers(page
);
1155 if (buffer_uptodate(bh
) && !buffer_mapped(bh
))
1157 else if (buffer_unwritten(bh
) && !buffer_delay(bh
))
1158 clear_buffer_unwritten(bh
);
1159 else if (buffer_unwritten(bh
))
1161 else if (buffer_delay(bh
))
1163 } while ((bh
= bh
->b_this_page
) != head
);
1168 * writepage: Called from one of two places:
1170 * 1. we are flushing a delalloc buffer head.
1172 * 2. we are writing out a dirty page. Typically the page dirty
1173 * state is cleared before we get here. In this case is it
1174 * conceivable we have no buffer heads.
1176 * For delalloc space on the page we need to allocate space and
1177 * flush it. For unmapped buffer heads on the page we should
1178 * allocate space if the page is uptodate. For any other dirty
1179 * buffer heads on the page we should flush them.
1181 * If we detect that a transaction would be required to flush
1182 * the page, we have to check the process flags first, if we
1183 * are already in a transaction or disk I/O during allocations
1184 * is off, we need to fail the writepage and redirty the page.
1190 struct writeback_control
*wbc
)
1194 int delalloc
, unmapped
, unwritten
;
1195 struct inode
*inode
= page
->mapping
->host
;
1197 xfs_page_trace(XFS_WRITEPAGE_ENTER
, inode
, page
, 0);
1200 * We need a transaction if:
1201 * 1. There are delalloc buffers on the page
1202 * 2. The page is uptodate and we have unmapped buffers
1203 * 3. The page is uptodate and we have no buffers
1204 * 4. There are unwritten buffers on the page
1207 if (!page_has_buffers(page
)) {
1211 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
1212 if (!PageUptodate(page
))
1214 need_trans
= delalloc
+ unmapped
+ unwritten
;
1218 * If we need a transaction and the process flags say
1219 * we are already in a transaction, or no IO is allowed
1220 * then mark the page dirty again and leave the page
1223 if (PFLAGS_TEST_FSTRANS() && need_trans
)
1227 * Delay hooking up buffer heads until we have
1228 * made our go/no-go decision.
1230 if (!page_has_buffers(page
))
1231 create_empty_buffers(page
, 1 << inode
->i_blkbits
, 0);
1234 * Convert delayed allocate, unwritten or unmapped space
1235 * to real space and flush out to disk.
1237 error
= xfs_page_state_convert(inode
, page
, wbc
, 1, unmapped
);
1238 if (error
== -EAGAIN
)
1240 if (unlikely(error
< 0))
1246 redirty_page_for_writepage(wbc
, page
);
1255 linvfs_invalidate_page(
1257 unsigned long offset
)
1259 xfs_page_trace(XFS_INVALIDPAGE_ENTER
,
1260 page
->mapping
->host
, page
, offset
);
1261 return block_invalidatepage(page
, offset
);
1265 * Called to move a page into cleanable state - and from there
1266 * to be released. Possibly the page is already clean. We always
1267 * have buffer heads in this call.
1269 * Returns 0 if the page is ok to release, 1 otherwise.
1271 * Possible scenarios are:
1273 * 1. We are being called to release a page which has been written
1274 * to via regular I/O. buffer heads will be dirty and possibly
1275 * delalloc. If no delalloc buffer heads in this case then we
1276 * can just return zero.
1278 * 2. We are called to release a page which has been written via
1279 * mmap, all we need to do is ensure there is no delalloc
1280 * state in the buffer heads, if not we can let the caller
1281 * free them and we should come back later via writepage.
1284 linvfs_release_page(
1288 struct inode
*inode
= page
->mapping
->host
;
1289 int dirty
, delalloc
, unmapped
, unwritten
;
1290 struct writeback_control wbc
= {
1291 .sync_mode
= WB_SYNC_ALL
,
1295 xfs_page_trace(XFS_RELEASEPAGE_ENTER
, inode
, page
, gfp_mask
);
1297 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
1298 if (!delalloc
&& !unwritten
)
1301 if (!(gfp_mask
& __GFP_FS
))
1304 /* If we are already inside a transaction or the thread cannot
1305 * do I/O, we cannot release this page.
1307 if (PFLAGS_TEST_FSTRANS())
1311 * Convert delalloc space to real space, do not flush the
1312 * data out to disk, that will be done by the caller.
1313 * Never need to allocate space here - we will always
1314 * come back to writepage in that case.
1316 dirty
= xfs_page_state_convert(inode
, page
, &wbc
, 0, 0);
1317 if (dirty
== 0 && !unwritten
)
1322 return try_to_free_buffers(page
);
1326 linvfs_prepare_write(
1332 return block_prepare_write(page
, from
, to
, linvfs_get_block
);
1335 struct address_space_operations linvfs_aops
= {
1336 .readpage
= linvfs_readpage
,
1337 .readpages
= linvfs_readpages
,
1338 .writepage
= linvfs_writepage
,
1339 .sync_page
= block_sync_page
,
1340 .releasepage
= linvfs_release_page
,
1341 .invalidatepage
= linvfs_invalidate_page
,
1342 .prepare_write
= linvfs_prepare_write
,
1343 .commit_write
= generic_commit_write
,
1344 .bmap
= linvfs_bmap
,
1345 .direct_IO
= linvfs_direct_IO
,