mm,thp: introduce flush_pmd_tlb_range
authorVineet Gupta <vgupta@synopsys.com>
Fri, 20 Feb 2015 05:06:28 +0000 (10:36 +0530)
committerVineet Gupta <vgupta@synopsys.com>
Sat, 17 Oct 2015 12:18:20 +0000 (17:48 +0530)
ARCHes with special requirements for evicting THP backing TLB entries
can implement this.

Otherwise also, it can help optimize TLB flush in THP regime.
stock flush_tlb_range() typically has optimization to nuke the entire
TLB if flush span is greater than a certain threshhold, which will
likely be true for a single huge page. Thus a single thp flush will
invalidate the entrire TLB which is not desirable.

e.g. see arch/arc: flush_pmd_tlb_range

Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Link: http://lkml.kernel.org/r/20151009100816.GC7873@node
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
mm/huge_memory.c
mm/pgtable-generic.c

index 4b06b8db9df23c8f33406586507bbaecf7f5444c..e25eb3d2081a04f68a3c496ee4665c9e712e24f3 100644 (file)
@@ -1880,7 +1880,7 @@ static int __split_huge_page_map(struct page *page,
                 * here). But it is generally safer to never allow
                 * small and huge TLB entries for the same virtual
                 * address to be loaded simultaneously. So instead of
-                * doing "pmd_populate(); flush_tlb_range();" we first
+                * doing "pmd_populate(); flush_pmd_tlb_range();" we first
                 * mark the current pmd notpresent (atomically because
                 * here the pmd_trans_huge and pmd_trans_splitting
                 * must remain set at all times on the pmd until the
index c9c59bb75a17f4f0b28a8988a629ee54a51e2e1f..7d3db0247983b22b121290c2203ba2c2fb544ec0 100644 (file)
@@ -84,6 +84,20 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
+#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+
+/*
+ * ARCHes with special requirements for evicting THP backing TLB entries can
+ * implement this. Otherwise also, it can help optimize normal TLB flush in
+ * THP regime. stock flush_tlb_range() typically has optimization to nuke the
+ * entire TLB TLB if flush span is greater than a threshhold, which will
+ * likely be true for a single huge page. Thus a single thp flush will
+ * invalidate the entire TLB which is not desitable.
+ * e.g. see arch/arc: flush_pmd_tlb_range
+ */
+#define flush_pmd_tlb_range(vma, addr, end)    flush_tlb_range(vma, addr, end)
+#endif
+
 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
 int pmdp_set_access_flags(struct vm_area_struct *vma,
                          unsigned long address, pmd_t *pmdp,
@@ -93,7 +107,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
        if (changed) {
                set_pmd_at(vma->vm_mm, address, pmdp, entry);
-               flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+               flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        }
        return changed;
 }
@@ -107,7 +121,7 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
        young = pmdp_test_and_clear_young(vma, address, pmdp);
        if (young)
-               flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+               flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        return young;
 }
 #endif
@@ -120,7 +134,7 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
        VM_BUG_ON(!pmd_trans_huge(*pmdp));
        pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
-       flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+       flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        return pmd;
 }
 #endif
@@ -133,7 +147,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
        set_pmd_at(vma->vm_mm, address, pmdp, pmd);
        /* tlb flush only to serialize against gup-fast */
-       flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+       flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 }
 #endif
 
@@ -179,7 +193,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 {
        pmd_t entry = *pmdp;
        set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
-       flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+       flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 }
 #endif
 
@@ -196,7 +210,7 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
        VM_BUG_ON(pmd_trans_huge(*pmdp));
        pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
-       flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+       flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        return pmd;
 }
 #endif
This page took 0.030109 seconds and 5 git commands to generate.