| 1 | /* |
| 2 | * IA-32 Huge TLB Page Support for Kernel. |
| 3 | * |
| 4 | * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> |
| 5 | */ |
| 6 | |
| 7 | #include <linux/init.h> |
| 8 | #include <linux/fs.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/hugetlb.h> |
| 11 | #include <linux/pagemap.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/err.h> |
| 14 | #include <linux/sysctl.h> |
| 15 | #include <asm/mman.h> |
| 16 | #include <asm/tlb.h> |
| 17 | #include <asm/tlbflush.h> |
| 18 | #include <asm/pgalloc.h> |
| 19 | |
| 20 | static unsigned long page_table_shareable(struct vm_area_struct *svma, |
| 21 | struct vm_area_struct *vma, |
| 22 | unsigned long addr, pgoff_t idx) |
| 23 | { |
| 24 | unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + |
| 25 | svma->vm_start; |
| 26 | unsigned long sbase = saddr & PUD_MASK; |
| 27 | unsigned long s_end = sbase + PUD_SIZE; |
| 28 | |
| 29 | /* |
| 30 | * match the virtual addresses, permission and the alignment of the |
| 31 | * page table page. |
| 32 | */ |
| 33 | if (pmd_index(addr) != pmd_index(saddr) || |
| 34 | vma->vm_flags != svma->vm_flags || |
| 35 | sbase < svma->vm_start || svma->vm_end < s_end) |
| 36 | return 0; |
| 37 | |
| 38 | return saddr; |
| 39 | } |
| 40 | |
| 41 | static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) |
| 42 | { |
| 43 | unsigned long base = addr & PUD_MASK; |
| 44 | unsigned long end = base + PUD_SIZE; |
| 45 | |
| 46 | /* |
| 47 | * check on proper vm_flags and page table alignment |
| 48 | */ |
| 49 | if (vma->vm_flags & VM_MAYSHARE && |
| 50 | vma->vm_start <= base && end <= vma->vm_end) |
| 51 | return 1; |
| 52 | return 0; |
| 53 | } |
| 54 | |
| 55 | /* |
| 56 | * search for a shareable pmd page for hugetlb. |
| 57 | */ |
| 58 | static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) |
| 59 | { |
| 60 | struct vm_area_struct *vma = find_vma(mm, addr); |
| 61 | struct address_space *mapping = vma->vm_file->f_mapping; |
| 62 | pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + |
| 63 | vma->vm_pgoff; |
| 64 | struct prio_tree_iter iter; |
| 65 | struct vm_area_struct *svma; |
| 66 | unsigned long saddr; |
| 67 | pte_t *spte = NULL; |
| 68 | |
| 69 | if (!vma_shareable(vma, addr)) |
| 70 | return; |
| 71 | |
| 72 | spin_lock(&mapping->i_mmap_lock); |
| 73 | vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { |
| 74 | if (svma == vma) |
| 75 | continue; |
| 76 | |
| 77 | saddr = page_table_shareable(svma, vma, addr, idx); |
| 78 | if (saddr) { |
| 79 | spte = huge_pte_offset(svma->vm_mm, saddr); |
| 80 | if (spte) { |
| 81 | get_page(virt_to_page(spte)); |
| 82 | break; |
| 83 | } |
| 84 | } |
| 85 | } |
| 86 | |
| 87 | if (!spte) |
| 88 | goto out; |
| 89 | |
| 90 | spin_lock(&mm->page_table_lock); |
| 91 | if (pud_none(*pud)) |
| 92 | pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK)); |
| 93 | else |
| 94 | put_page(virt_to_page(spte)); |
| 95 | spin_unlock(&mm->page_table_lock); |
| 96 | out: |
| 97 | spin_unlock(&mapping->i_mmap_lock); |
| 98 | } |
| 99 | |
| 100 | /* |
| 101 | * unmap huge page backed by shared pte. |
| 102 | * |
| 103 | * Hugetlb pte page is ref counted at the time of mapping. If pte is shared |
| 104 | * indicated by page_count > 1, unmap is achieved by clearing pud and |
| 105 | * decrementing the ref count. If count == 1, the pte page is not shared. |
| 106 | * |
| 107 | * called with vma->vm_mm->page_table_lock held. |
| 108 | * |
| 109 | * returns: 1 successfully unmapped a shared pte page |
| 110 | * 0 the underlying pte page is not shared, or it is the last user |
| 111 | */ |
| 112 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) |
| 113 | { |
| 114 | pgd_t *pgd = pgd_offset(mm, *addr); |
| 115 | pud_t *pud = pud_offset(pgd, *addr); |
| 116 | |
| 117 | BUG_ON(page_count(virt_to_page(ptep)) == 0); |
| 118 | if (page_count(virt_to_page(ptep)) == 1) |
| 119 | return 0; |
| 120 | |
| 121 | pud_clear(pud); |
| 122 | put_page(virt_to_page(ptep)); |
| 123 | *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; |
| 124 | return 1; |
| 125 | } |
| 126 | |
| 127 | pte_t *huge_pte_alloc(struct mm_struct *mm, |
| 128 | unsigned long addr, unsigned long sz) |
| 129 | { |
| 130 | pgd_t *pgd; |
| 131 | pud_t *pud; |
| 132 | pte_t *pte = NULL; |
| 133 | |
| 134 | pgd = pgd_offset(mm, addr); |
| 135 | pud = pud_alloc(mm, pgd, addr); |
| 136 | if (pud) { |
| 137 | if (pud_none(*pud)) |
| 138 | huge_pmd_share(mm, addr, pud); |
| 139 | pte = (pte_t *) pmd_alloc(mm, pud, addr); |
| 140 | } |
| 141 | BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); |
| 142 | |
| 143 | return pte; |
| 144 | } |
| 145 | |
| 146 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
| 147 | { |
| 148 | pgd_t *pgd; |
| 149 | pud_t *pud; |
| 150 | pmd_t *pmd = NULL; |
| 151 | |
| 152 | pgd = pgd_offset(mm, addr); |
| 153 | if (pgd_present(*pgd)) { |
| 154 | pud = pud_offset(pgd, addr); |
| 155 | if (pud_present(*pud)) |
| 156 | pmd = pmd_offset(pud, addr); |
| 157 | } |
| 158 | return (pte_t *) pmd; |
| 159 | } |
| 160 | |
| 161 | #if 0 /* This is just for testing */ |
| 162 | struct page * |
| 163 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) |
| 164 | { |
| 165 | unsigned long start = address; |
| 166 | int length = 1; |
| 167 | int nr; |
| 168 | struct page *page; |
| 169 | struct vm_area_struct *vma; |
| 170 | |
| 171 | vma = find_vma(mm, addr); |
| 172 | if (!vma || !is_vm_hugetlb_page(vma)) |
| 173 | return ERR_PTR(-EINVAL); |
| 174 | |
| 175 | pte = huge_pte_offset(mm, address); |
| 176 | |
| 177 | /* hugetlb should be locked, and hence, prefaulted */ |
| 178 | WARN_ON(!pte || pte_none(*pte)); |
| 179 | |
| 180 | page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; |
| 181 | |
| 182 | WARN_ON(!PageHead(page)); |
| 183 | |
| 184 | return page; |
| 185 | } |
| 186 | |
| 187 | int pmd_huge(pmd_t pmd) |
| 188 | { |
| 189 | return 0; |
| 190 | } |
| 191 | |
| 192 | struct page * |
| 193 | follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
| 194 | pmd_t *pmd, int write) |
| 195 | { |
| 196 | return NULL; |
| 197 | } |
| 198 | |
| 199 | #else |
| 200 | |
| 201 | struct page * |
| 202 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) |
| 203 | { |
| 204 | return ERR_PTR(-EINVAL); |
| 205 | } |
| 206 | |
| 207 | int pmd_huge(pmd_t pmd) |
| 208 | { |
| 209 | return !!(pmd_val(pmd) & _PAGE_PSE); |
| 210 | } |
| 211 | |
| 212 | struct page * |
| 213 | follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
| 214 | pmd_t *pmd, int write) |
| 215 | { |
| 216 | struct page *page; |
| 217 | |
| 218 | page = pte_page(*(pte_t *)pmd); |
| 219 | if (page) |
| 220 | page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); |
| 221 | return page; |
| 222 | } |
| 223 | #endif |
| 224 | |
| 225 | /* x86_64 also uses this file */ |
| 226 | |
| 227 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
| 228 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, |
| 229 | unsigned long addr, unsigned long len, |
| 230 | unsigned long pgoff, unsigned long flags) |
| 231 | { |
| 232 | struct mm_struct *mm = current->mm; |
| 233 | struct vm_area_struct *vma; |
| 234 | unsigned long start_addr; |
| 235 | |
| 236 | if (len > mm->cached_hole_size) { |
| 237 | start_addr = mm->free_area_cache; |
| 238 | } else { |
| 239 | start_addr = TASK_UNMAPPED_BASE; |
| 240 | mm->cached_hole_size = 0; |
| 241 | } |
| 242 | |
| 243 | full_search: |
| 244 | addr = ALIGN(start_addr, HPAGE_SIZE); |
| 245 | |
| 246 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { |
| 247 | /* At this point: (!vma || addr < vma->vm_end). */ |
| 248 | if (TASK_SIZE - len < addr) { |
| 249 | /* |
| 250 | * Start a new search - just in case we missed |
| 251 | * some holes. |
| 252 | */ |
| 253 | if (start_addr != TASK_UNMAPPED_BASE) { |
| 254 | start_addr = TASK_UNMAPPED_BASE; |
| 255 | mm->cached_hole_size = 0; |
| 256 | goto full_search; |
| 257 | } |
| 258 | return -ENOMEM; |
| 259 | } |
| 260 | if (!vma || addr + len <= vma->vm_start) { |
| 261 | mm->free_area_cache = addr + len; |
| 262 | return addr; |
| 263 | } |
| 264 | if (addr + mm->cached_hole_size < vma->vm_start) |
| 265 | mm->cached_hole_size = vma->vm_start - addr; |
| 266 | addr = ALIGN(vma->vm_end, HPAGE_SIZE); |
| 267 | } |
| 268 | } |
| 269 | |
| 270 | static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, |
| 271 | unsigned long addr0, unsigned long len, |
| 272 | unsigned long pgoff, unsigned long flags) |
| 273 | { |
| 274 | struct mm_struct *mm = current->mm; |
| 275 | struct vm_area_struct *vma, *prev_vma; |
| 276 | unsigned long base = mm->mmap_base, addr = addr0; |
| 277 | unsigned long largest_hole = mm->cached_hole_size; |
| 278 | int first_time = 1; |
| 279 | |
| 280 | /* don't allow allocations above current base */ |
| 281 | if (mm->free_area_cache > base) |
| 282 | mm->free_area_cache = base; |
| 283 | |
| 284 | if (len <= largest_hole) { |
| 285 | largest_hole = 0; |
| 286 | mm->free_area_cache = base; |
| 287 | } |
| 288 | try_again: |
| 289 | /* make sure it can fit in the remaining address space */ |
| 290 | if (mm->free_area_cache < len) |
| 291 | goto fail; |
| 292 | |
| 293 | /* either no address requested or cant fit in requested address hole */ |
| 294 | addr = (mm->free_area_cache - len) & HPAGE_MASK; |
| 295 | do { |
| 296 | /* |
| 297 | * Lookup failure means no vma is above this address, |
| 298 | * i.e. return with success: |
| 299 | */ |
| 300 | if (!(vma = find_vma_prev(mm, addr, &prev_vma))) |
| 301 | return addr; |
| 302 | |
| 303 | /* |
| 304 | * new region fits between prev_vma->vm_end and |
| 305 | * vma->vm_start, use it: |
| 306 | */ |
| 307 | if (addr + len <= vma->vm_start && |
| 308 | (!prev_vma || (addr >= prev_vma->vm_end))) { |
| 309 | /* remember the address as a hint for next time */ |
| 310 | mm->cached_hole_size = largest_hole; |
| 311 | return (mm->free_area_cache = addr); |
| 312 | } else { |
| 313 | /* pull free_area_cache down to the first hole */ |
| 314 | if (mm->free_area_cache == vma->vm_end) { |
| 315 | mm->free_area_cache = vma->vm_start; |
| 316 | mm->cached_hole_size = largest_hole; |
| 317 | } |
| 318 | } |
| 319 | |
| 320 | /* remember the largest hole we saw so far */ |
| 321 | if (addr + largest_hole < vma->vm_start) |
| 322 | largest_hole = vma->vm_start - addr; |
| 323 | |
| 324 | /* try just below the current vma->vm_start */ |
| 325 | addr = (vma->vm_start - len) & HPAGE_MASK; |
| 326 | } while (len <= vma->vm_start); |
| 327 | |
| 328 | fail: |
| 329 | /* |
| 330 | * if hint left us with no space for the requested |
| 331 | * mapping then try again: |
| 332 | */ |
| 333 | if (first_time) { |
| 334 | mm->free_area_cache = base; |
| 335 | largest_hole = 0; |
| 336 | first_time = 0; |
| 337 | goto try_again; |
| 338 | } |
| 339 | /* |
| 340 | * A failed mmap() very likely causes application failure, |
| 341 | * so fall back to the bottom-up function here. This scenario |
| 342 | * can happen with large stack limits and large mmap() |
| 343 | * allocations. |
| 344 | */ |
| 345 | mm->free_area_cache = TASK_UNMAPPED_BASE; |
| 346 | mm->cached_hole_size = ~0UL; |
| 347 | addr = hugetlb_get_unmapped_area_bottomup(file, addr0, |
| 348 | len, pgoff, flags); |
| 349 | |
| 350 | /* |
| 351 | * Restore the topdown base: |
| 352 | */ |
| 353 | mm->free_area_cache = base; |
| 354 | mm->cached_hole_size = ~0UL; |
| 355 | |
| 356 | return addr; |
| 357 | } |
| 358 | |
| 359 | unsigned long |
| 360 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
| 361 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 362 | { |
| 363 | struct mm_struct *mm = current->mm; |
| 364 | struct vm_area_struct *vma; |
| 365 | |
| 366 | if (len & ~HPAGE_MASK) |
| 367 | return -EINVAL; |
| 368 | if (len > TASK_SIZE) |
| 369 | return -ENOMEM; |
| 370 | |
| 371 | if (flags & MAP_FIXED) { |
| 372 | if (prepare_hugepage_range(file, addr, len)) |
| 373 | return -EINVAL; |
| 374 | return addr; |
| 375 | } |
| 376 | |
| 377 | if (addr) { |
| 378 | addr = ALIGN(addr, HPAGE_SIZE); |
| 379 | vma = find_vma(mm, addr); |
| 380 | if (TASK_SIZE - len >= addr && |
| 381 | (!vma || addr + len <= vma->vm_start)) |
| 382 | return addr; |
| 383 | } |
| 384 | if (mm->get_unmapped_area == arch_get_unmapped_area) |
| 385 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, |
| 386 | pgoff, flags); |
| 387 | else |
| 388 | return hugetlb_get_unmapped_area_topdown(file, addr, len, |
| 389 | pgoff, flags); |
| 390 | } |
| 391 | |
| 392 | #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ |
| 393 | |