2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
35 #define RADIX_DAX_MASK 0xf
36 #define RADIX_DAX_SHIFT 4
37 #define RADIX_DAX_PTE (0x4 | RADIX_TREE_EXCEPTIONAL_ENTRY)
38 #define RADIX_DAX_PMD (0x8 | RADIX_TREE_EXCEPTIONAL_ENTRY)
39 #define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_MASK)
40 #define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
41 #define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
42 RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE)))
44 static long dax_map_atomic(struct block_device
*bdev
, struct blk_dax_ctl
*dax
)
46 struct request_queue
*q
= bdev
->bd_queue
;
49 dax
->addr
= (void __pmem
*) ERR_PTR(-EIO
);
50 if (blk_queue_enter(q
, true) != 0)
53 rc
= bdev_direct_access(bdev
, dax
);
55 dax
->addr
= (void __pmem
*) ERR_PTR(rc
);
62 static void dax_unmap_atomic(struct block_device
*bdev
,
63 const struct blk_dax_ctl
*dax
)
65 if (IS_ERR(dax
->addr
))
67 blk_queue_exit(bdev
->bd_queue
);
70 struct page
*read_dax_sector(struct block_device
*bdev
, sector_t n
)
72 struct page
*page
= alloc_pages(GFP_KERNEL
, 0);
73 struct blk_dax_ctl dax
= {
75 .sector
= n
& ~((((int) PAGE_SIZE
) / 512) - 1),
80 return ERR_PTR(-ENOMEM
);
82 rc
= dax_map_atomic(bdev
, &dax
);
85 memcpy_from_pmem(page_address(page
), dax
.addr
, PAGE_SIZE
);
86 dax_unmap_atomic(bdev
, &dax
);
91 * dax_clear_sectors() is called from within transaction context from XFS,
92 * and hence this means the stack from this point must follow GFP_NOFS
93 * semantics for all operations.
95 int dax_clear_sectors(struct block_device
*bdev
, sector_t _sector
, long _size
)
97 struct blk_dax_ctl dax
= {
106 count
= dax_map_atomic(bdev
, &dax
);
109 sz
= min_t(long, count
, SZ_128K
);
110 clear_pmem(dax
.addr
, sz
);
112 dax
.sector
+= sz
/ 512;
113 dax_unmap_atomic(bdev
, &dax
);
120 EXPORT_SYMBOL_GPL(dax_clear_sectors
);
122 static bool buffer_written(struct buffer_head
*bh
)
124 return buffer_mapped(bh
) && !buffer_unwritten(bh
);
128 * When ext4 encounters a hole, it returns without modifying the buffer_head
129 * which means that we can't trust b_size. To cope with this, we set b_state
130 * to 0 before calling get_block and, if any bit is set, we know we can trust
131 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
132 * and would save us time calling get_block repeatedly.
134 static bool buffer_size_valid(struct buffer_head
*bh
)
136 return bh
->b_state
!= 0;
140 static sector_t
to_sector(const struct buffer_head
*bh
,
141 const struct inode
*inode
)
143 sector_t sector
= bh
->b_blocknr
<< (inode
->i_blkbits
- 9);
148 static ssize_t
dax_io(struct inode
*inode
, struct iov_iter
*iter
,
149 loff_t start
, loff_t end
, get_block_t get_block
,
150 struct buffer_head
*bh
)
152 loff_t pos
= start
, max
= start
, bh_max
= start
;
153 bool hole
= false, need_wmb
= false;
154 struct block_device
*bdev
= NULL
;
155 int rw
= iov_iter_rw(iter
), rc
;
157 struct blk_dax_ctl dax
= {
158 .addr
= (void __pmem
*) ERR_PTR(-EIO
),
160 unsigned blkbits
= inode
->i_blkbits
;
161 sector_t file_blks
= (i_size_read(inode
) + (1 << blkbits
) - 1)
165 end
= min(end
, i_size_read(inode
));
170 long page
= pos
>> PAGE_SHIFT
;
171 sector_t block
= page
<< (PAGE_SHIFT
- blkbits
);
172 unsigned first
= pos
- (block
<< blkbits
);
176 bh
->b_size
= PAGE_ALIGN(end
- pos
);
178 rc
= get_block(inode
, block
, bh
, rw
== WRITE
);
181 if (!buffer_size_valid(bh
))
182 bh
->b_size
= 1 << blkbits
;
183 bh_max
= pos
- first
+ bh
->b_size
;
186 * We allow uninitialized buffers for writes
187 * beyond EOF as those cannot race with faults
190 (buffer_new(bh
) && block
< file_blks
) ||
191 (rw
== WRITE
&& buffer_unwritten(bh
)));
193 unsigned done
= bh
->b_size
-
194 (bh_max
- (pos
- first
));
195 bh
->b_blocknr
+= done
>> blkbits
;
199 hole
= rw
== READ
&& !buffer_written(bh
);
201 size
= bh
->b_size
- first
;
203 dax_unmap_atomic(bdev
, &dax
);
204 dax
.sector
= to_sector(bh
, inode
);
205 dax
.size
= bh
->b_size
;
206 map_len
= dax_map_atomic(bdev
, &dax
);
212 size
= map_len
- first
;
214 max
= min(pos
+ size
, end
);
217 if (iov_iter_rw(iter
) == WRITE
) {
218 len
= copy_from_iter_pmem(dax
.addr
, max
- pos
, iter
);
221 len
= copy_to_iter((void __force
*) dax
.addr
, max
- pos
,
224 len
= iov_iter_zero(max
- pos
, iter
);
232 if (!IS_ERR(dax
.addr
))
238 dax_unmap_atomic(bdev
, &dax
);
240 return (pos
== start
) ? rc
: pos
- start
;
244 * dax_do_io - Perform I/O to a DAX file
245 * @iocb: The control block for this I/O
246 * @inode: The file which the I/O is directed at
247 * @iter: The addresses to do I/O from or to
248 * @pos: The file offset where the I/O starts
249 * @get_block: The filesystem method used to translate file offsets to blocks
250 * @end_io: A filesystem callback for I/O completion
253 * This function uses the same locking scheme as do_blockdev_direct_IO:
254 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
255 * caller for writes. For reads, we take and release the i_mutex ourselves.
256 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
257 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
260 ssize_t
dax_do_io(struct kiocb
*iocb
, struct inode
*inode
,
261 struct iov_iter
*iter
, loff_t pos
, get_block_t get_block
,
262 dio_iodone_t end_io
, int flags
)
264 struct buffer_head bh
;
265 ssize_t retval
= -EINVAL
;
266 loff_t end
= pos
+ iov_iter_count(iter
);
268 memset(&bh
, 0, sizeof(bh
));
269 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
271 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
)
274 /* Protects against truncate */
275 if (!(flags
& DIO_SKIP_DIO_COUNT
))
276 inode_dio_begin(inode
);
278 retval
= dax_io(inode
, iter
, pos
, end
, get_block
, &bh
);
280 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
)
286 err
= end_io(iocb
, pos
, retval
, bh
.b_private
);
291 if (!(flags
& DIO_SKIP_DIO_COUNT
))
292 inode_dio_end(inode
);
295 EXPORT_SYMBOL_GPL(dax_do_io
);
298 * The user has performed a load from a hole in the file. Allocating
299 * a new page in the file would cause excessive storage usage for
300 * workloads with sparse files. We allocate a page cache page instead.
301 * We'll kick it out of the page cache if it's ever written to,
302 * otherwise it will simply fall out of the page cache under memory
303 * pressure without ever having been dirtied.
305 static int dax_load_hole(struct address_space
*mapping
, struct page
*page
,
306 struct vm_fault
*vmf
)
309 page
= find_or_create_page(mapping
, vmf
->pgoff
,
310 GFP_KERNEL
| __GFP_ZERO
);
315 return VM_FAULT_LOCKED
;
318 static int copy_user_bh(struct page
*to
, struct inode
*inode
,
319 struct buffer_head
*bh
, unsigned long vaddr
)
321 struct blk_dax_ctl dax
= {
322 .sector
= to_sector(bh
, inode
),
325 struct block_device
*bdev
= bh
->b_bdev
;
328 if (dax_map_atomic(bdev
, &dax
) < 0)
329 return PTR_ERR(dax
.addr
);
330 vto
= kmap_atomic(to
);
331 copy_user_page(vto
, (void __force
*)dax
.addr
, vaddr
, to
);
333 dax_unmap_atomic(bdev
, &dax
);
338 #define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
340 static int dax_radix_entry(struct address_space
*mapping
, pgoff_t index
,
341 sector_t sector
, bool pmd_entry
, bool dirty
)
343 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
344 pgoff_t pmd_index
= DAX_PMD_INDEX(index
);
348 WARN_ON_ONCE(pmd_entry
&& !dirty
);
350 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
352 spin_lock_irq(&mapping
->tree_lock
);
354 entry
= radix_tree_lookup(page_tree
, pmd_index
);
355 if (entry
&& RADIX_DAX_TYPE(entry
) == RADIX_DAX_PMD
) {
360 entry
= radix_tree_lookup(page_tree
, index
);
362 type
= RADIX_DAX_TYPE(entry
);
363 if (WARN_ON_ONCE(type
!= RADIX_DAX_PTE
&&
364 type
!= RADIX_DAX_PMD
)) {
369 if (!pmd_entry
|| type
== RADIX_DAX_PMD
)
373 * We only insert dirty PMD entries into the radix tree. This
374 * means we don't need to worry about removing a dirty PTE
375 * entry and inserting a clean PMD entry, thus reducing the
376 * range we would flush with a follow-up fsync/msync call.
378 radix_tree_delete(&mapping
->page_tree
, index
);
379 mapping
->nrexceptional
--;
382 if (sector
== NO_SECTOR
) {
384 * This can happen during correct operation if our pfn_mkwrite
385 * fault raced against a hole punch operation. If this
386 * happens the pte that was hole punched will have been
387 * unmapped and the radix tree entry will have been removed by
388 * the time we are called, but the call will still happen. We
389 * will return all the way up to wp_pfn_shared(), where the
390 * pte_same() check will fail, eventually causing page fault
391 * to be retried by the CPU.
396 error
= radix_tree_insert(page_tree
, index
,
397 RADIX_DAX_ENTRY(sector
, pmd_entry
));
401 mapping
->nrexceptional
++;
404 radix_tree_tag_set(page_tree
, index
, PAGECACHE_TAG_DIRTY
);
406 spin_unlock_irq(&mapping
->tree_lock
);
410 static int dax_writeback_one(struct block_device
*bdev
,
411 struct address_space
*mapping
, pgoff_t index
, void *entry
)
413 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
414 int type
= RADIX_DAX_TYPE(entry
);
415 struct radix_tree_node
*node
;
416 struct blk_dax_ctl dax
;
420 spin_lock_irq(&mapping
->tree_lock
);
422 * Regular page slots are stabilized by the page lock even
423 * without the tree itself locked. These unlocked entries
424 * need verification under the tree lock.
426 if (!__radix_tree_lookup(page_tree
, index
, &node
, &slot
))
431 /* another fsync thread may have already written back this entry */
432 if (!radix_tree_tag_get(page_tree
, index
, PAGECACHE_TAG_TOWRITE
))
435 if (WARN_ON_ONCE(type
!= RADIX_DAX_PTE
&& type
!= RADIX_DAX_PMD
)) {
440 dax
.sector
= RADIX_DAX_SECTOR(entry
);
441 dax
.size
= (type
== RADIX_DAX_PMD
? PMD_SIZE
: PAGE_SIZE
);
442 spin_unlock_irq(&mapping
->tree_lock
);
445 * We cannot hold tree_lock while calling dax_map_atomic() because it
446 * eventually calls cond_resched().
448 ret
= dax_map_atomic(bdev
, &dax
);
452 if (WARN_ON_ONCE(ret
< dax
.size
)) {
457 wb_cache_pmem(dax
.addr
, dax
.size
);
459 spin_lock_irq(&mapping
->tree_lock
);
460 radix_tree_tag_clear(page_tree
, index
, PAGECACHE_TAG_TOWRITE
);
461 spin_unlock_irq(&mapping
->tree_lock
);
463 dax_unmap_atomic(bdev
, &dax
);
467 spin_unlock_irq(&mapping
->tree_lock
);
472 * Flush the mapping to the persistent domain within the byte range of [start,
473 * end]. This is required by data integrity operations to ensure file data is
474 * on persistent storage prior to completion of the operation.
476 int dax_writeback_mapping_range(struct address_space
*mapping
,
477 struct block_device
*bdev
, struct writeback_control
*wbc
)
479 struct inode
*inode
= mapping
->host
;
480 pgoff_t start_index
, end_index
, pmd_index
;
481 pgoff_t indices
[PAGEVEC_SIZE
];
487 if (WARN_ON_ONCE(inode
->i_blkbits
!= PAGE_SHIFT
))
490 if (!mapping
->nrexceptional
|| wbc
->sync_mode
!= WB_SYNC_ALL
)
493 start_index
= wbc
->range_start
>> PAGE_SHIFT
;
494 end_index
= wbc
->range_end
>> PAGE_SHIFT
;
495 pmd_index
= DAX_PMD_INDEX(start_index
);
498 entry
= radix_tree_lookup(&mapping
->page_tree
, pmd_index
);
501 /* see if the start of our range is covered by a PMD entry */
502 if (entry
&& RADIX_DAX_TYPE(entry
) == RADIX_DAX_PMD
)
503 start_index
= pmd_index
;
505 tag_pages_for_writeback(mapping
, start_index
, end_index
);
507 pagevec_init(&pvec
, 0);
509 pvec
.nr
= find_get_entries_tag(mapping
, start_index
,
510 PAGECACHE_TAG_TOWRITE
, PAGEVEC_SIZE
,
511 pvec
.pages
, indices
);
516 for (i
= 0; i
< pvec
.nr
; i
++) {
517 if (indices
[i
] > end_index
) {
522 ret
= dax_writeback_one(bdev
, mapping
, indices
[i
],
531 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range
);
533 static int dax_insert_mapping(struct inode
*inode
, struct buffer_head
*bh
,
534 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
536 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
537 struct address_space
*mapping
= inode
->i_mapping
;
538 struct block_device
*bdev
= bh
->b_bdev
;
539 struct blk_dax_ctl dax
= {
540 .sector
= to_sector(bh
, inode
),
545 i_mmap_lock_read(mapping
);
547 if (dax_map_atomic(bdev
, &dax
) < 0) {
548 error
= PTR_ERR(dax
.addr
);
551 dax_unmap_atomic(bdev
, &dax
);
553 error
= dax_radix_entry(mapping
, vmf
->pgoff
, dax
.sector
, false,
554 vmf
->flags
& FAULT_FLAG_WRITE
);
558 error
= vm_insert_mixed(vma
, vaddr
, dax
.pfn
);
561 i_mmap_unlock_read(mapping
);
567 * __dax_fault - handle a page fault on a DAX file
568 * @vma: The virtual memory area where the fault occurred
569 * @vmf: The description of the fault
570 * @get_block: The filesystem method used to translate file offsets to blocks
572 * When a page fault occurs, filesystems may call this helper in their
573 * fault handler for DAX files. __dax_fault() assumes the caller has done all
574 * the necessary locking for the page fault to proceed successfully.
576 int __dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
577 get_block_t get_block
)
579 struct file
*file
= vma
->vm_file
;
580 struct address_space
*mapping
= file
->f_mapping
;
581 struct inode
*inode
= mapping
->host
;
583 struct buffer_head bh
;
584 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
585 unsigned blkbits
= inode
->i_blkbits
;
591 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
592 if (vmf
->pgoff
>= size
)
593 return VM_FAULT_SIGBUS
;
595 memset(&bh
, 0, sizeof(bh
));
596 block
= (sector_t
)vmf
->pgoff
<< (PAGE_SHIFT
- blkbits
);
597 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
598 bh
.b_size
= PAGE_SIZE
;
601 page
= find_get_page(mapping
, vmf
->pgoff
);
603 if (!lock_page_or_retry(page
, vma
->vm_mm
, vmf
->flags
)) {
605 return VM_FAULT_RETRY
;
607 if (unlikely(page
->mapping
!= mapping
)) {
614 error
= get_block(inode
, block
, &bh
, 0);
615 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
616 error
= -EIO
; /* fs corruption? */
620 if (!buffer_mapped(&bh
) && !vmf
->cow_page
) {
621 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
622 error
= get_block(inode
, block
, &bh
, 1);
623 count_vm_event(PGMAJFAULT
);
624 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
625 major
= VM_FAULT_MAJOR
;
626 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
631 return dax_load_hole(mapping
, page
, vmf
);
636 struct page
*new_page
= vmf
->cow_page
;
637 if (buffer_written(&bh
))
638 error
= copy_user_bh(new_page
, inode
, &bh
, vaddr
);
640 clear_user_highpage(new_page
, vaddr
);
645 i_mmap_lock_read(mapping
);
646 return VM_FAULT_LOCKED
;
649 /* Check we didn't race with a read fault installing a new page */
651 page
= find_lock_page(mapping
, vmf
->pgoff
);
654 unmap_mapping_range(mapping
, vmf
->pgoff
<< PAGE_SHIFT
,
656 delete_from_page_cache(page
);
662 /* Filesystem should not return unwritten buffers to us! */
663 WARN_ON_ONCE(buffer_unwritten(&bh
) || buffer_new(&bh
));
664 error
= dax_insert_mapping(inode
, &bh
, vma
, vmf
);
667 if (error
== -ENOMEM
)
668 return VM_FAULT_OOM
| major
;
669 /* -EBUSY is fine, somebody else faulted on the same PTE */
670 if ((error
< 0) && (error
!= -EBUSY
))
671 return VM_FAULT_SIGBUS
| major
;
672 return VM_FAULT_NOPAGE
| major
;
681 EXPORT_SYMBOL(__dax_fault
);
684 * dax_fault - handle a page fault on a DAX file
685 * @vma: The virtual memory area where the fault occurred
686 * @vmf: The description of the fault
687 * @get_block: The filesystem method used to translate file offsets to blocks
689 * When a page fault occurs, filesystems may call this helper in their
690 * fault handler for DAX files.
692 int dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
693 get_block_t get_block
)
696 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
698 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
699 sb_start_pagefault(sb
);
700 file_update_time(vma
->vm_file
);
702 result
= __dax_fault(vma
, vmf
, get_block
);
703 if (vmf
->flags
& FAULT_FLAG_WRITE
)
704 sb_end_pagefault(sb
);
708 EXPORT_SYMBOL_GPL(dax_fault
);
710 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
712 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
713 * more often than one might expect in the below function.
715 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
717 static void __dax_dbg(struct buffer_head
*bh
, unsigned long address
,
718 const char *reason
, const char *fn
)
721 char bname
[BDEVNAME_SIZE
];
722 bdevname(bh
->b_bdev
, bname
);
723 pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
724 "length %zd fallback: %s\n", fn
, current
->comm
,
725 address
, bname
, bh
->b_state
, (u64
)bh
->b_blocknr
,
728 pr_debug("%s: %s addr: %lx fallback: %s\n", fn
,
729 current
->comm
, address
, reason
);
733 #define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd")
735 int __dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
736 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
)
738 struct file
*file
= vma
->vm_file
;
739 struct address_space
*mapping
= file
->f_mapping
;
740 struct inode
*inode
= mapping
->host
;
741 struct buffer_head bh
;
742 unsigned blkbits
= inode
->i_blkbits
;
743 unsigned long pmd_addr
= address
& PMD_MASK
;
744 bool write
= flags
& FAULT_FLAG_WRITE
;
745 struct block_device
*bdev
;
748 int error
, result
= 0;
751 /* dax pmd mappings require pfn_t_devmap() */
752 if (!IS_ENABLED(CONFIG_FS_DAX_PMD
))
753 return VM_FAULT_FALLBACK
;
755 /* Fall back to PTEs if we're going to COW */
756 if (write
&& !(vma
->vm_flags
& VM_SHARED
)) {
757 split_huge_pmd(vma
, pmd
, address
);
758 dax_pmd_dbg(NULL
, address
, "cow write");
759 return VM_FAULT_FALLBACK
;
761 /* If the PMD would extend outside the VMA */
762 if (pmd_addr
< vma
->vm_start
) {
763 dax_pmd_dbg(NULL
, address
, "vma start unaligned");
764 return VM_FAULT_FALLBACK
;
766 if ((pmd_addr
+ PMD_SIZE
) > vma
->vm_end
) {
767 dax_pmd_dbg(NULL
, address
, "vma end unaligned");
768 return VM_FAULT_FALLBACK
;
771 pgoff
= linear_page_index(vma
, pmd_addr
);
772 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
774 return VM_FAULT_SIGBUS
;
775 /* If the PMD would cover blocks out of the file */
776 if ((pgoff
| PG_PMD_COLOUR
) >= size
) {
777 dax_pmd_dbg(NULL
, address
,
778 "offset + huge page size > file size");
779 return VM_FAULT_FALLBACK
;
782 memset(&bh
, 0, sizeof(bh
));
783 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
784 block
= (sector_t
)pgoff
<< (PAGE_SHIFT
- blkbits
);
786 bh
.b_size
= PMD_SIZE
;
788 if (get_block(inode
, block
, &bh
, 0) != 0)
789 return VM_FAULT_SIGBUS
;
791 if (!buffer_mapped(&bh
) && write
) {
792 if (get_block(inode
, block
, &bh
, 1) != 0)
793 return VM_FAULT_SIGBUS
;
795 WARN_ON_ONCE(buffer_unwritten(&bh
) || buffer_new(&bh
));
801 * If the filesystem isn't willing to tell us the length of a hole,
802 * just fall back to PTEs. Calling get_block 512 times in a loop
805 if (!buffer_size_valid(&bh
) || bh
.b_size
< PMD_SIZE
) {
806 dax_pmd_dbg(&bh
, address
, "allocated block too small");
807 return VM_FAULT_FALLBACK
;
811 * If we allocated new storage, make sure no process has any
812 * zero pages covering this hole
815 loff_t lstart
= pgoff
<< PAGE_SHIFT
;
816 loff_t lend
= lstart
+ PMD_SIZE
- 1; /* inclusive */
818 truncate_pagecache_range(inode
, lstart
, lend
);
821 i_mmap_lock_read(mapping
);
823 if (!write
&& !buffer_mapped(&bh
) && buffer_uptodate(&bh
)) {
826 struct page
*zero_page
= get_huge_zero_page();
828 if (unlikely(!zero_page
)) {
829 dax_pmd_dbg(&bh
, address
, "no zero page");
833 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
834 if (!pmd_none(*pmd
)) {
836 dax_pmd_dbg(&bh
, address
, "pmd already present");
840 dev_dbg(part_to_dev(bdev
->bd_part
),
841 "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
842 __func__
, current
->comm
, address
,
843 (unsigned long long) to_sector(&bh
, inode
));
845 entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
846 entry
= pmd_mkhuge(entry
);
847 set_pmd_at(vma
->vm_mm
, pmd_addr
, pmd
, entry
);
848 result
= VM_FAULT_NOPAGE
;
851 struct blk_dax_ctl dax
= {
852 .sector
= to_sector(&bh
, inode
),
855 long length
= dax_map_atomic(bdev
, &dax
);
858 dax_pmd_dbg(&bh
, address
, "dax-error fallback");
861 if (length
< PMD_SIZE
) {
862 dax_pmd_dbg(&bh
, address
, "dax-length too small");
863 dax_unmap_atomic(bdev
, &dax
);
866 if (pfn_t_to_pfn(dax
.pfn
) & PG_PMD_COLOUR
) {
867 dax_pmd_dbg(&bh
, address
, "pfn unaligned");
868 dax_unmap_atomic(bdev
, &dax
);
872 if (!pfn_t_devmap(dax
.pfn
)) {
873 dax_unmap_atomic(bdev
, &dax
);
874 dax_pmd_dbg(&bh
, address
, "pfn not in memmap");
877 dax_unmap_atomic(bdev
, &dax
);
880 * For PTE faults we insert a radix tree entry for reads, and
881 * leave it clean. Then on the first write we dirty the radix
882 * tree entry via the dax_pfn_mkwrite() path. This sequence
883 * allows the dax_pfn_mkwrite() call to be simpler and avoid a
884 * call into get_block() to translate the pgoff to a sector in
885 * order to be able to create a new radix tree entry.
887 * The PMD path doesn't have an equivalent to
888 * dax_pfn_mkwrite(), though, so for a read followed by a
889 * write we traverse all the way through __dax_pmd_fault()
890 * twice. This means we can just skip inserting a radix tree
891 * entry completely on the initial read and just wait until
892 * the write to insert a dirty entry.
895 error
= dax_radix_entry(mapping
, pgoff
, dax
.sector
,
898 dax_pmd_dbg(&bh
, address
,
899 "PMD radix insertion failed");
904 dev_dbg(part_to_dev(bdev
->bd_part
),
905 "%s: %s addr: %lx pfn: %lx sect: %llx\n",
906 __func__
, current
->comm
, address
,
907 pfn_t_to_pfn(dax
.pfn
),
908 (unsigned long long) dax
.sector
);
909 result
|= vmf_insert_pfn_pmd(vma
, address
, pmd
,
914 i_mmap_unlock_read(mapping
);
919 count_vm_event(THP_FAULT_FALLBACK
);
920 result
= VM_FAULT_FALLBACK
;
923 EXPORT_SYMBOL_GPL(__dax_pmd_fault
);
926 * dax_pmd_fault - handle a PMD fault on a DAX file
927 * @vma: The virtual memory area where the fault occurred
928 * @vmf: The description of the fault
929 * @get_block: The filesystem method used to translate file offsets to blocks
931 * When a page fault occurs, filesystems may call this helper in their
932 * pmd_fault handler for DAX files.
934 int dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
935 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
)
938 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
940 if (flags
& FAULT_FLAG_WRITE
) {
941 sb_start_pagefault(sb
);
942 file_update_time(vma
->vm_file
);
944 result
= __dax_pmd_fault(vma
, address
, pmd
, flags
, get_block
);
945 if (flags
& FAULT_FLAG_WRITE
)
946 sb_end_pagefault(sb
);
950 EXPORT_SYMBOL_GPL(dax_pmd_fault
);
951 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
954 * dax_pfn_mkwrite - handle first write to DAX page
955 * @vma: The virtual memory area where the fault occurred
956 * @vmf: The description of the fault
958 int dax_pfn_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
960 struct file
*file
= vma
->vm_file
;
964 * We pass NO_SECTOR to dax_radix_entry() because we expect that a
965 * RADIX_DAX_PTE entry already exists in the radix tree from a
966 * previous call to __dax_fault(). We just want to look up that PTE
967 * entry using vmf->pgoff and make sure the dirty tag is set. This
968 * saves us from having to make a call to get_block() here to look
971 error
= dax_radix_entry(file
->f_mapping
, vmf
->pgoff
, NO_SECTOR
, false,
974 if (error
== -ENOMEM
)
977 return VM_FAULT_SIGBUS
;
978 return VM_FAULT_NOPAGE
;
980 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite
);
983 * dax_zero_page_range - zero a range within a page of a DAX file
984 * @inode: The file being truncated
985 * @from: The file offset that is being truncated to
986 * @length: The number of bytes to zero
987 * @get_block: The filesystem method used to translate file offsets to blocks
989 * This function can be called by a filesystem when it is zeroing part of a
990 * page in a DAX file. This is intended for hole-punch operations. If
991 * you are truncating a file, the helper function dax_truncate_page() may be
994 * We work in terms of PAGE_SIZE here for commonality with
995 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
996 * took care of disposing of the unnecessary blocks. Even if the filesystem
997 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
998 * since the file might be mmapped.
1000 int dax_zero_page_range(struct inode
*inode
, loff_t from
, unsigned length
,
1001 get_block_t get_block
)
1003 struct buffer_head bh
;
1004 pgoff_t index
= from
>> PAGE_SHIFT
;
1005 unsigned offset
= from
& (PAGE_SIZE
-1);
1008 /* Block boundary? Nothing to do */
1011 BUG_ON((offset
+ length
) > PAGE_SIZE
);
1013 memset(&bh
, 0, sizeof(bh
));
1014 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
1015 bh
.b_size
= PAGE_SIZE
;
1016 err
= get_block(inode
, index
, &bh
, 0);
1019 if (buffer_written(&bh
)) {
1020 struct block_device
*bdev
= bh
.b_bdev
;
1021 struct blk_dax_ctl dax
= {
1022 .sector
= to_sector(&bh
, inode
),
1026 if (dax_map_atomic(bdev
, &dax
) < 0)
1027 return PTR_ERR(dax
.addr
);
1028 clear_pmem(dax
.addr
+ offset
, length
);
1030 dax_unmap_atomic(bdev
, &dax
);
1035 EXPORT_SYMBOL_GPL(dax_zero_page_range
);
1038 * dax_truncate_page - handle a partial page being truncated in a DAX file
1039 * @inode: The file being truncated
1040 * @from: The file offset that is being truncated to
1041 * @get_block: The filesystem method used to translate file offsets to blocks
1043 * Similar to block_truncate_page(), this function can be called by a
1044 * filesystem when it is truncating a DAX file to handle the partial page.
1046 * We work in terms of PAGE_SIZE here for commonality with
1047 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1048 * took care of disposing of the unnecessary blocks. Even if the filesystem
1049 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
1050 * since the file might be mmapped.
1052 int dax_truncate_page(struct inode
*inode
, loff_t from
, get_block_t get_block
)
1054 unsigned length
= PAGE_ALIGN(from
) - from
;
1055 return dax_zero_page_range(inode
, from
, length
, get_block
);
1057 EXPORT_SYMBOL_GPL(dax_truncate_page
);