x86/tlb: fall back to flush all when meet a THP large page
authorAlex Shi <alex.shi@intel.com>
Thu, 28 Jun 2012 01:02:18 +0000 (09:02 +0800)
committerH. Peter Anvin <hpa@zytor.com>
Thu, 28 Jun 2012 02:29:09 +0000 (19:29 -0700)
We don't need to flush large pages by PAGE_SIZE step, that just waste
time. and actually, large page don't need 'invlpg' optimizing according
to our micro benchmark. So, just flush whole TLB is enough for them.

The following result is tested on a 2CPU * 4cores * 2HT NHM EP machine,
with THP 'always' setting.

Multi-thread testing, '-t' paramter is thread number:
                       without this patch  with this patch
./mprotect -t 1         14ns                       13ns
./mprotect -t 2         13ns                       13ns
./mprotect -t 4         12ns                       11ns
./mprotect -t 8         14ns                       10ns
./mprotect -t 16        28ns                       28ns
./mprotect -t 32        54ns                       52ns
./mprotect -t 128       200ns                      200ns

Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-4-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
arch/x86/mm/tlb.c

index 3b91c981a27fbcd275230b4f1329d33f10e47473..184a02a4d871b1fc7381daed9eb3f899a1135aa8 100644 (file)
@@ -318,12 +318,42 @@ void flush_tlb_mm(struct mm_struct *mm)
 
 #define FLUSHALL_BAR   16
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline unsigned long has_large_page(struct mm_struct *mm,
+                                unsigned long start, unsigned long end)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       unsigned long addr = ALIGN(start, HPAGE_SIZE);
+       for (; addr < end; addr += HPAGE_SIZE) {
+               pgd = pgd_offset(mm, addr);
+               if (likely(!pgd_none(*pgd))) {
+                       pud = pud_offset(pgd, addr);
+                       if (likely(!pud_none(*pud))) {
+                               pmd = pmd_offset(pud, addr);
+                               if (likely(!pmd_none(*pmd)))
+                                       if (pmd_large(*pmd))
+                                               return addr;
+                       }
+               }
+       }
+       return 0;
+}
+#else
+static inline unsigned long has_large_page(struct mm_struct *mm,
+                                unsigned long start, unsigned long end)
+{
+       return 0;
+}
+#endif
 void flush_tlb_range(struct vm_area_struct *vma,
                                   unsigned long start, unsigned long end)
 {
        struct mm_struct *mm;
 
        if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB) {
+flush_all:
                flush_tlb_mm(vma->vm_mm);
                return;
        }
@@ -346,6 +376,10 @@ void flush_tlb_range(struct vm_area_struct *vma,
                        if ((end - start)/PAGE_SIZE > act_entries/FLUSHALL_BAR)
                                local_flush_tlb();
                        else {
+                               if (has_large_page(mm, start, end)) {
+                                       preempt_enable();
+                                       goto flush_all;
+                               }
                                for (addr = start; addr < end;
                                                addr += PAGE_SIZE)
                                        __flush_tlb_single(addr);
This page took 0.030197 seconds and 5 git commands to generate.