mm, dax, gpu: convert vm_insert_mixed to pfn_t
authorDan Williams <dan.j.williams@intel.com>
Sat, 16 Jan 2016 00:56:40 +0000 (16:56 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 16 Jan 2016 01:56:32 +0000 (17:56 -0800)
Convert the raw unsigned long 'pfn' argument to pfn_t for the purpose of
evaluating the PFN_MAP and PFN_DEV flags.  When both are set it triggers
_PAGE_DEVMAP to be set in the resulting pte.

There are no functional changes to the gpu drivers as a result of this
conversion.

Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave@sr71.net>
Cc: David Airlie <airlied@linux.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/include/asm/pgtable.h
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/gma500/framebuffer.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
fs/dax.c
include/linux/mm.h
include/linux/pfn_t.h
mm/memory.c

index 4973cc9eaccecdd957c38ec7fa138e79435bca5c..4c668f15a53227c43ed0d547ba8d6c0faa9c42cd 100644 (file)
@@ -247,6 +247,11 @@ static inline pte_t pte_mkspecial(pte_t pte)
        return pte_set_flags(pte, _PAGE_SPECIAL);
 }
 
+static inline pte_t pte_mkdevmap(pte_t pte)
+{
+       return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
+}
+
 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
 {
        pmdval_t v = native_pmd_val(pmd);
index 252eb301470ce576df2daf0cd01b7942172da51b..32358c5e3db4be25e7127225fa86e343b97757c7 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/shmem_fs.h>
 #include <linux/dma-buf.h>
+#include <linux/pfn_t.h>
 #include <drm/exynos_drm.h>
 
 #include "exynos_drm_drv.h"
@@ -490,7 +491,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        }
 
        pfn = page_to_pfn(exynos_gem->pages[page_offset]);
-       ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+       ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
+                       __pfn_to_pfn_t(pfn, PFN_DEV));
 
 out:
        switch (ret) {
index 2eaf1b31c7bd8c76d8d69d60a0dd43343466e9ec..72bc979fa0dcec41fca7a2c2c5d8f7a9a20a5ea3 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/string.h>
+#include <linux/pfn_t.h>
 #include <linux/mm.h>
 #include <linux/tty.h>
 #include <linux/slab.h>
@@ -132,7 +133,8 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        for (i = 0; i < page_num; i++) {
                pfn = (phys_addr >> PAGE_SHIFT);
 
-               ret = vm_insert_mixed(vma, address, pfn);
+               ret = vm_insert_mixed(vma, address,
+                               __pfn_to_pfn_t(pfn, PFN_DEV));
                if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
                        break;
                else if (unlikely(ret != 0)) {
index c76cc853b08a57effec626b8c6f537b270ca61ac..3cedb8d5c855ac26c3e775db521ab48ddf42fb39 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/spinlock.h>
 #include <linux/shmem_fs.h>
 #include <linux/dma-buf.h>
+#include <linux/pfn_t.h>
 
 #include "msm_drv.h"
 #include "msm_gem.h"
@@ -222,7 +223,8 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
                        pfn, pfn << PAGE_SHIFT);
 
-       ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+       ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
+                       __pfn_to_pfn_t(pfn, PFN_DEV));
 
 out_unlock:
        mutex_unlock(&dev->struct_mutex);
index 7ed08fdc4c4285eff29f109ce67de4b79e970de2..ceba5459ceb75b33bded69b6b56ffb9c5919a03c 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <linux/shmem_fs.h>
 #include <linux/spinlock.h>
+#include <linux/pfn_t.h>
 
 #include <drm/drm_vma_manager.h>
 
@@ -385,7 +386,8 @@ static int fault_1d(struct drm_gem_object *obj,
        VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
                        pfn, pfn << PAGE_SHIFT);
 
-       return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+       return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
+                       __pfn_to_pfn_t(pfn, PFN_DEV));
 }
 
 /* Special handling for the case of faulting in 2d tiled buffers */
@@ -478,7 +480,8 @@ static int fault_2d(struct drm_gem_object *obj,
                        pfn, pfn << PAGE_SHIFT);
 
        for (i = n; i > 0; i--) {
-               vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
+               vm_insert_mixed(vma, (unsigned long)vaddr,
+                               __pfn_to_pfn_t(pfn, PFN_DEV));
                pfn += usergart[fmt].stride_pfn;
                vaddr += PAGE_SIZE * m;
        }
index 8fb7213277cc9029d101f2006e7bc50289ab51df..06d26dc438b264dd76e7cbba28fd6b3d7c92fea3 100644 (file)
@@ -35,6 +35,7 @@
 #include <ttm/ttm_placement.h>
 #include <drm/drm_vma_manager.h>
 #include <linux/mm.h>
+#include <linux/pfn_t.h>
 #include <linux/rbtree.h>
 #include <linux/module.h>
 #include <linux/uaccess.h>
@@ -229,7 +230,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                }
 
                if (vma->vm_flags & VM_MIXEDMAP)
-                       ret = vm_insert_mixed(&cvma, address, pfn);
+                       ret = vm_insert_mixed(&cvma, address,
+                                       __pfn_to_pfn_t(pfn, PFN_DEV));
                else
                        ret = vm_insert_pfn(&cvma, address, pfn);
 
index 6b13d6cd9a9a3b649b04a243045318e8f01e30cd..574763eed8a361444f555c3f2745a9f7eaf3181f 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -363,7 +363,7 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
        }
        dax_unmap_atomic(bdev, &dax);
 
-       error = vm_insert_mixed(vma, vaddr, pfn_t_to_pfn(dax.pfn));
+       error = vm_insert_mixed(vma, vaddr, dax.pfn);
 
  out:
        i_mmap_unlock_read(mapping);
index 8bb0907a360352db2adac2087052c534263ff1f1..a9902152449f0e8a30b72b037568269eae98ee6e 100644 (file)
@@ -2107,7 +2107,7 @@ int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn);
 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
-                       unsigned long pfn);
+                       pfn_t pfn);
 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
 
 
index c557a0e0b20cb51eb5585e11383b08cb2e044821..bdaa275d7623e7b55b5d0b09dff43ff8e593c8c8 100644 (file)
@@ -64,4 +64,31 @@ static inline pfn_t page_to_pfn_t(struct page *page)
 {
        return pfn_to_pfn_t(page_to_pfn(page));
 }
+
+static inline int pfn_t_valid(pfn_t pfn)
+{
+       return pfn_valid(pfn_t_to_pfn(pfn));
+}
+
+#ifdef CONFIG_MMU
+static inline pte_t pfn_t_pte(pfn_t pfn, pgprot_t pgprot)
+{
+       return pfn_pte(pfn_t_to_pfn(pfn), pgprot);
+}
+#endif
+
+#ifdef __HAVE_ARCH_PTE_DEVMAP
+static inline bool pfn_t_devmap(pfn_t pfn)
+{
+       const unsigned long flags = PFN_DEV|PFN_MAP;
+
+       return (pfn.val & flags) == flags;
+}
+#else
+static inline bool pfn_t_devmap(pfn_t pfn)
+{
+       return false;
+}
+pte_t pte_mkdevmap(pte_t pte);
+#endif
 #endif /* _LINUX_PFN_T_H_ */
index 5a73c6ed8e5cedfb5730c808eb69c3ea8fbeb414..7f03652723ea91044db5b3a27f82335c24dc38a9 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/export.h>
 #include <linux/delayacct.h>
 #include <linux/init.h>
+#include <linux/pfn_t.h>
 #include <linux/writeback.h>
 #include <linux/memcontrol.h>
 #include <linux/mmu_notifier.h>
@@ -1500,7 +1501,7 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
 EXPORT_SYMBOL(vm_insert_page);
 
 static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
-                       unsigned long pfn, pgprot_t prot)
+                       pfn_t pfn, pgprot_t prot)
 {
        struct mm_struct *mm = vma->vm_mm;
        int retval;
@@ -1516,7 +1517,10 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                goto out_unlock;
 
        /* Ok, finally just insert the thing.. */
-       entry = pte_mkspecial(pfn_pte(pfn, prot));
+       if (pfn_t_devmap(pfn))
+               entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
+       else
+               entry = pte_mkspecial(pfn_t_pte(pfn, prot));
        set_pte_at(mm, addr, pte, entry);
        update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
 
@@ -1566,14 +1570,14 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
        if (track_pfn_insert(vma, &pgprot, pfn))
                return -EINVAL;
 
-       ret = insert_pfn(vma, addr, pfn, pgprot);
+       ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot);
 
        return ret;
 }
 EXPORT_SYMBOL(vm_insert_pfn);
 
 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
-                       unsigned long pfn)
+                       pfn_t pfn)
 {
        BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
 
@@ -1587,10 +1591,10 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
         * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
         * without pte special, it would there be refcounted as a normal page.
         */
-       if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
+       if (!HAVE_PTE_SPECIAL && pfn_t_valid(pfn)) {
                struct page *page;
 
-               page = pfn_to_page(pfn);
+               page = pfn_t_to_page(pfn);
                return insert_page(vma, addr, page, vma->vm_page_prot);
        }
        return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
This page took 0.052739 seconds and 5 git commands to generate.