0adb74d0a4e1f57e8a305ca5445c7f75edd58137
[deliverable/linux.git] / mm / hugetlb.c
1 /*
2 * Generic hugetlb support.
3 * (C) Nadia Yvette Chambers, April 2004
4 */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/mm.h>
8 #include <linux/seq_file.h>
9 #include <linux/sysctl.h>
10 #include <linux/highmem.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/compiler.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <linux/page-isolation.h>
25 #include <linux/jhash.h>
26
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/tlb.h>
30
31 #include <linux/io.h>
32 #include <linux/hugetlb.h>
33 #include <linux/hugetlb_cgroup.h>
34 #include <linux/node.h>
35 #include "internal.h"
36
37 int hugepages_treat_as_movable;
38
39 int hugetlb_max_hstate __read_mostly;
40 unsigned int default_hstate_idx;
41 struct hstate hstates[HUGE_MAX_HSTATE];
42 /*
43 * Minimum page order among possible hugepage sizes, set to a proper value
44 * at boot time.
45 */
46 static unsigned int minimum_order __read_mostly = UINT_MAX;
47
48 __initdata LIST_HEAD(huge_boot_pages);
49
50 /* for command line parsing */
51 static struct hstate * __initdata parsed_hstate;
52 static unsigned long __initdata default_hstate_max_huge_pages;
53 static unsigned long __initdata default_hstate_size;
54 static bool __initdata parsed_valid_hugepagesz = true;
55
56 /*
57 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
58 * free_huge_pages, and surplus_huge_pages.
59 */
60 DEFINE_SPINLOCK(hugetlb_lock);
61
62 /*
63 * Serializes faults on the same logical page. This is used to
64 * prevent spurious OOMs when the hugepage pool is fully utilized.
65 */
66 static int num_fault_mutexes;
67 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
68
69 /* Forward declaration */
70 static int hugetlb_acct_memory(struct hstate *h, long delta);
71
72 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
73 {
74 bool free = (spool->count == 0) && (spool->used_hpages == 0);
75
76 spin_unlock(&spool->lock);
77
78 /* If no pages are used, and no other handles to the subpool
79 * remain, give up any reservations mased on minimum size and
80 * free the subpool */
81 if (free) {
82 if (spool->min_hpages != -1)
83 hugetlb_acct_memory(spool->hstate,
84 -spool->min_hpages);
85 kfree(spool);
86 }
87 }
88
89 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
90 long min_hpages)
91 {
92 struct hugepage_subpool *spool;
93
94 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
95 if (!spool)
96 return NULL;
97
98 spin_lock_init(&spool->lock);
99 spool->count = 1;
100 spool->max_hpages = max_hpages;
101 spool->hstate = h;
102 spool->min_hpages = min_hpages;
103
104 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
105 kfree(spool);
106 return NULL;
107 }
108 spool->rsv_hpages = min_hpages;
109
110 return spool;
111 }
112
113 void hugepage_put_subpool(struct hugepage_subpool *spool)
114 {
115 spin_lock(&spool->lock);
116 BUG_ON(!spool->count);
117 spool->count--;
118 unlock_or_release_subpool(spool);
119 }
120
121 /*
122 * Subpool accounting for allocating and reserving pages.
123 * Return -ENOMEM if there are not enough resources to satisfy the
124 * the request. Otherwise, return the number of pages by which the
125 * global pools must be adjusted (upward). The returned value may
126 * only be different than the passed value (delta) in the case where
127 * a subpool minimum size must be manitained.
128 */
129 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
130 long delta)
131 {
132 long ret = delta;
133
134 if (!spool)
135 return ret;
136
137 spin_lock(&spool->lock);
138
139 if (spool->max_hpages != -1) { /* maximum size accounting */
140 if ((spool->used_hpages + delta) <= spool->max_hpages)
141 spool->used_hpages += delta;
142 else {
143 ret = -ENOMEM;
144 goto unlock_ret;
145 }
146 }
147
148 /* minimum size accounting */
149 if (spool->min_hpages != -1 && spool->rsv_hpages) {
150 if (delta > spool->rsv_hpages) {
151 /*
152 * Asking for more reserves than those already taken on
153 * behalf of subpool. Return difference.
154 */
155 ret = delta - spool->rsv_hpages;
156 spool->rsv_hpages = 0;
157 } else {
158 ret = 0; /* reserves already accounted for */
159 spool->rsv_hpages -= delta;
160 }
161 }
162
163 unlock_ret:
164 spin_unlock(&spool->lock);
165 return ret;
166 }
167
168 /*
169 * Subpool accounting for freeing and unreserving pages.
170 * Return the number of global page reservations that must be dropped.
171 * The return value may only be different than the passed value (delta)
172 * in the case where a subpool minimum size must be maintained.
173 */
174 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
175 long delta)
176 {
177 long ret = delta;
178
179 if (!spool)
180 return delta;
181
182 spin_lock(&spool->lock);
183
184 if (spool->max_hpages != -1) /* maximum size accounting */
185 spool->used_hpages -= delta;
186
187 /* minimum size accounting */
188 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
189 if (spool->rsv_hpages + delta <= spool->min_hpages)
190 ret = 0;
191 else
192 ret = spool->rsv_hpages + delta - spool->min_hpages;
193
194 spool->rsv_hpages += delta;
195 if (spool->rsv_hpages > spool->min_hpages)
196 spool->rsv_hpages = spool->min_hpages;
197 }
198
199 /*
200 * If hugetlbfs_put_super couldn't free spool due to an outstanding
201 * quota reference, free it now.
202 */
203 unlock_or_release_subpool(spool);
204
205 return ret;
206 }
207
208 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
209 {
210 return HUGETLBFS_SB(inode->i_sb)->spool;
211 }
212
213 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
214 {
215 return subpool_inode(file_inode(vma->vm_file));
216 }
217
218 /*
219 * Region tracking -- allows tracking of reservations and instantiated pages
220 * across the pages in a mapping.
221 *
222 * The region data structures are embedded into a resv_map and protected
223 * by a resv_map's lock. The set of regions within the resv_map represent
224 * reservations for huge pages, or huge pages that have already been
225 * instantiated within the map. The from and to elements are huge page
226 * indicies into the associated mapping. from indicates the starting index
227 * of the region. to represents the first index past the end of the region.
228 *
229 * For example, a file region structure with from == 0 and to == 4 represents
230 * four huge pages in a mapping. It is important to note that the to element
231 * represents the first element past the end of the region. This is used in
232 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
233 *
234 * Interval notation of the form [from, to) will be used to indicate that
235 * the endpoint from is inclusive and to is exclusive.
236 */
237 struct file_region {
238 struct list_head link;
239 long from;
240 long to;
241 };
242
243 /*
244 * Add the huge page range represented by [f, t) to the reserve
245 * map. In the normal case, existing regions will be expanded
246 * to accommodate the specified range. Sufficient regions should
247 * exist for expansion due to the previous call to region_chg
248 * with the same range. However, it is possible that region_del
249 * could have been called after region_chg and modifed the map
250 * in such a way that no region exists to be expanded. In this
251 * case, pull a region descriptor from the cache associated with
252 * the map and use that for the new range.
253 *
254 * Return the number of new huge pages added to the map. This
255 * number is greater than or equal to zero.
256 */
257 static long region_add(struct resv_map *resv, long f, long t)
258 {
259 struct list_head *head = &resv->regions;
260 struct file_region *rg, *nrg, *trg;
261 long add = 0;
262
263 spin_lock(&resv->lock);
264 /* Locate the region we are either in or before. */
265 list_for_each_entry(rg, head, link)
266 if (f <= rg->to)
267 break;
268
269 /*
270 * If no region exists which can be expanded to include the
271 * specified range, the list must have been modified by an
272 * interleving call to region_del(). Pull a region descriptor
273 * from the cache and use it for this range.
274 */
275 if (&rg->link == head || t < rg->from) {
276 VM_BUG_ON(resv->region_cache_count <= 0);
277
278 resv->region_cache_count--;
279 nrg = list_first_entry(&resv->region_cache, struct file_region,
280 link);
281 list_del(&nrg->link);
282
283 nrg->from = f;
284 nrg->to = t;
285 list_add(&nrg->link, rg->link.prev);
286
287 add += t - f;
288 goto out_locked;
289 }
290
291 /* Round our left edge to the current segment if it encloses us. */
292 if (f > rg->from)
293 f = rg->from;
294
295 /* Check for and consume any regions we now overlap with. */
296 nrg = rg;
297 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
298 if (&rg->link == head)
299 break;
300 if (rg->from > t)
301 break;
302
303 /* If this area reaches higher then extend our area to
304 * include it completely. If this is not the first area
305 * which we intend to reuse, free it. */
306 if (rg->to > t)
307 t = rg->to;
308 if (rg != nrg) {
309 /* Decrement return value by the deleted range.
310 * Another range will span this area so that by
311 * end of routine add will be >= zero
312 */
313 add -= (rg->to - rg->from);
314 list_del(&rg->link);
315 kfree(rg);
316 }
317 }
318
319 add += (nrg->from - f); /* Added to beginning of region */
320 nrg->from = f;
321 add += t - nrg->to; /* Added to end of region */
322 nrg->to = t;
323
324 out_locked:
325 resv->adds_in_progress--;
326 spin_unlock(&resv->lock);
327 VM_BUG_ON(add < 0);
328 return add;
329 }
330
331 /*
332 * Examine the existing reserve map and determine how many
333 * huge pages in the specified range [f, t) are NOT currently
334 * represented. This routine is called before a subsequent
335 * call to region_add that will actually modify the reserve
336 * map to add the specified range [f, t). region_chg does
337 * not change the number of huge pages represented by the
338 * map. However, if the existing regions in the map can not
339 * be expanded to represent the new range, a new file_region
340 * structure is added to the map as a placeholder. This is
341 * so that the subsequent region_add call will have all the
342 * regions it needs and will not fail.
343 *
344 * Upon entry, region_chg will also examine the cache of region descriptors
345 * associated with the map. If there are not enough descriptors cached, one
346 * will be allocated for the in progress add operation.
347 *
348 * Returns the number of huge pages that need to be added to the existing
349 * reservation map for the range [f, t). This number is greater or equal to
350 * zero. -ENOMEM is returned if a new file_region structure or cache entry
351 * is needed and can not be allocated.
352 */
353 static long region_chg(struct resv_map *resv, long f, long t)
354 {
355 struct list_head *head = &resv->regions;
356 struct file_region *rg, *nrg = NULL;
357 long chg = 0;
358
359 retry:
360 spin_lock(&resv->lock);
361 retry_locked:
362 resv->adds_in_progress++;
363
364 /*
365 * Check for sufficient descriptors in the cache to accommodate
366 * the number of in progress add operations.
367 */
368 if (resv->adds_in_progress > resv->region_cache_count) {
369 struct file_region *trg;
370
371 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
372 /* Must drop lock to allocate a new descriptor. */
373 resv->adds_in_progress--;
374 spin_unlock(&resv->lock);
375
376 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
377 if (!trg) {
378 kfree(nrg);
379 return -ENOMEM;
380 }
381
382 spin_lock(&resv->lock);
383 list_add(&trg->link, &resv->region_cache);
384 resv->region_cache_count++;
385 goto retry_locked;
386 }
387
388 /* Locate the region we are before or in. */
389 list_for_each_entry(rg, head, link)
390 if (f <= rg->to)
391 break;
392
393 /* If we are below the current region then a new region is required.
394 * Subtle, allocate a new region at the position but make it zero
395 * size such that we can guarantee to record the reservation. */
396 if (&rg->link == head || t < rg->from) {
397 if (!nrg) {
398 resv->adds_in_progress--;
399 spin_unlock(&resv->lock);
400 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
401 if (!nrg)
402 return -ENOMEM;
403
404 nrg->from = f;
405 nrg->to = f;
406 INIT_LIST_HEAD(&nrg->link);
407 goto retry;
408 }
409
410 list_add(&nrg->link, rg->link.prev);
411 chg = t - f;
412 goto out_nrg;
413 }
414
415 /* Round our left edge to the current segment if it encloses us. */
416 if (f > rg->from)
417 f = rg->from;
418 chg = t - f;
419
420 /* Check for and consume any regions we now overlap with. */
421 list_for_each_entry(rg, rg->link.prev, link) {
422 if (&rg->link == head)
423 break;
424 if (rg->from > t)
425 goto out;
426
427 /* We overlap with this area, if it extends further than
428 * us then we must extend ourselves. Account for its
429 * existing reservation. */
430 if (rg->to > t) {
431 chg += rg->to - t;
432 t = rg->to;
433 }
434 chg -= rg->to - rg->from;
435 }
436
437 out:
438 spin_unlock(&resv->lock);
439 /* We already know we raced and no longer need the new region */
440 kfree(nrg);
441 return chg;
442 out_nrg:
443 spin_unlock(&resv->lock);
444 return chg;
445 }
446
447 /*
448 * Abort the in progress add operation. The adds_in_progress field
449 * of the resv_map keeps track of the operations in progress between
450 * calls to region_chg and region_add. Operations are sometimes
451 * aborted after the call to region_chg. In such cases, region_abort
452 * is called to decrement the adds_in_progress counter.
453 *
454 * NOTE: The range arguments [f, t) are not needed or used in this
455 * routine. They are kept to make reading the calling code easier as
456 * arguments will match the associated region_chg call.
457 */
458 static void region_abort(struct resv_map *resv, long f, long t)
459 {
460 spin_lock(&resv->lock);
461 VM_BUG_ON(!resv->region_cache_count);
462 resv->adds_in_progress--;
463 spin_unlock(&resv->lock);
464 }
465
466 /*
467 * Delete the specified range [f, t) from the reserve map. If the
468 * t parameter is LONG_MAX, this indicates that ALL regions after f
469 * should be deleted. Locate the regions which intersect [f, t)
470 * and either trim, delete or split the existing regions.
471 *
472 * Returns the number of huge pages deleted from the reserve map.
473 * In the normal case, the return value is zero or more. In the
474 * case where a region must be split, a new region descriptor must
475 * be allocated. If the allocation fails, -ENOMEM will be returned.
476 * NOTE: If the parameter t == LONG_MAX, then we will never split
477 * a region and possibly return -ENOMEM. Callers specifying
478 * t == LONG_MAX do not need to check for -ENOMEM error.
479 */
480 static long region_del(struct resv_map *resv, long f, long t)
481 {
482 struct list_head *head = &resv->regions;
483 struct file_region *rg, *trg;
484 struct file_region *nrg = NULL;
485 long del = 0;
486
487 retry:
488 spin_lock(&resv->lock);
489 list_for_each_entry_safe(rg, trg, head, link) {
490 /*
491 * Skip regions before the range to be deleted. file_region
492 * ranges are normally of the form [from, to). However, there
493 * may be a "placeholder" entry in the map which is of the form
494 * (from, to) with from == to. Check for placeholder entries
495 * at the beginning of the range to be deleted.
496 */
497 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
498 continue;
499
500 if (rg->from >= t)
501 break;
502
503 if (f > rg->from && t < rg->to) { /* Must split region */
504 /*
505 * Check for an entry in the cache before dropping
506 * lock and attempting allocation.
507 */
508 if (!nrg &&
509 resv->region_cache_count > resv->adds_in_progress) {
510 nrg = list_first_entry(&resv->region_cache,
511 struct file_region,
512 link);
513 list_del(&nrg->link);
514 resv->region_cache_count--;
515 }
516
517 if (!nrg) {
518 spin_unlock(&resv->lock);
519 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
520 if (!nrg)
521 return -ENOMEM;
522 goto retry;
523 }
524
525 del += t - f;
526
527 /* New entry for end of split region */
528 nrg->from = t;
529 nrg->to = rg->to;
530 INIT_LIST_HEAD(&nrg->link);
531
532 /* Original entry is trimmed */
533 rg->to = f;
534
535 list_add(&nrg->link, &rg->link);
536 nrg = NULL;
537 break;
538 }
539
540 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
541 del += rg->to - rg->from;
542 list_del(&rg->link);
543 kfree(rg);
544 continue;
545 }
546
547 if (f <= rg->from) { /* Trim beginning of region */
548 del += t - rg->from;
549 rg->from = t;
550 } else { /* Trim end of region */
551 del += rg->to - f;
552 rg->to = f;
553 }
554 }
555
556 spin_unlock(&resv->lock);
557 kfree(nrg);
558 return del;
559 }
560
561 /*
562 * A rare out of memory error was encountered which prevented removal of
563 * the reserve map region for a page. The huge page itself was free'ed
564 * and removed from the page cache. This routine will adjust the subpool
565 * usage count, and the global reserve count if needed. By incrementing
566 * these counts, the reserve map entry which could not be deleted will
567 * appear as a "reserved" entry instead of simply dangling with incorrect
568 * counts.
569 */
570 void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve)
571 {
572 struct hugepage_subpool *spool = subpool_inode(inode);
573 long rsv_adjust;
574
575 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
576 if (restore_reserve && rsv_adjust) {
577 struct hstate *h = hstate_inode(inode);
578
579 hugetlb_acct_memory(h, 1);
580 }
581 }
582
583 /*
584 * Count and return the number of huge pages in the reserve map
585 * that intersect with the range [f, t).
586 */
587 static long region_count(struct resv_map *resv, long f, long t)
588 {
589 struct list_head *head = &resv->regions;
590 struct file_region *rg;
591 long chg = 0;
592
593 spin_lock(&resv->lock);
594 /* Locate each segment we overlap with, and count that overlap. */
595 list_for_each_entry(rg, head, link) {
596 long seg_from;
597 long seg_to;
598
599 if (rg->to <= f)
600 continue;
601 if (rg->from >= t)
602 break;
603
604 seg_from = max(rg->from, f);
605 seg_to = min(rg->to, t);
606
607 chg += seg_to - seg_from;
608 }
609 spin_unlock(&resv->lock);
610
611 return chg;
612 }
613
614 /*
615 * Convert the address within this vma to the page offset within
616 * the mapping, in pagecache page units; huge pages here.
617 */
618 static pgoff_t vma_hugecache_offset(struct hstate *h,
619 struct vm_area_struct *vma, unsigned long address)
620 {
621 return ((address - vma->vm_start) >> huge_page_shift(h)) +
622 (vma->vm_pgoff >> huge_page_order(h));
623 }
624
625 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
626 unsigned long address)
627 {
628 return vma_hugecache_offset(hstate_vma(vma), vma, address);
629 }
630
631 /*
632 * Return the size of the pages allocated when backing a VMA. In the majority
633 * cases this will be same size as used by the page table entries.
634 */
635 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
636 {
637 struct hstate *hstate;
638
639 if (!is_vm_hugetlb_page(vma))
640 return PAGE_SIZE;
641
642 hstate = hstate_vma(vma);
643
644 return 1UL << huge_page_shift(hstate);
645 }
646 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
647
648 /*
649 * Return the page size being used by the MMU to back a VMA. In the majority
650 * of cases, the page size used by the kernel matches the MMU size. On
651 * architectures where it differs, an architecture-specific version of this
652 * function is required.
653 */
654 #ifndef vma_mmu_pagesize
655 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
656 {
657 return vma_kernel_pagesize(vma);
658 }
659 #endif
660
661 /*
662 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
663 * bits of the reservation map pointer, which are always clear due to
664 * alignment.
665 */
666 #define HPAGE_RESV_OWNER (1UL << 0)
667 #define HPAGE_RESV_UNMAPPED (1UL << 1)
668 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
669
670 /*
671 * These helpers are used to track how many pages are reserved for
672 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
673 * is guaranteed to have their future faults succeed.
674 *
675 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
676 * the reserve counters are updated with the hugetlb_lock held. It is safe
677 * to reset the VMA at fork() time as it is not in use yet and there is no
678 * chance of the global counters getting corrupted as a result of the values.
679 *
680 * The private mapping reservation is represented in a subtly different
681 * manner to a shared mapping. A shared mapping has a region map associated
682 * with the underlying file, this region map represents the backing file
683 * pages which have ever had a reservation assigned which this persists even
684 * after the page is instantiated. A private mapping has a region map
685 * associated with the original mmap which is attached to all VMAs which
686 * reference it, this region map represents those offsets which have consumed
687 * reservation ie. where pages have been instantiated.
688 */
689 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
690 {
691 return (unsigned long)vma->vm_private_data;
692 }
693
694 static void set_vma_private_data(struct vm_area_struct *vma,
695 unsigned long value)
696 {
697 vma->vm_private_data = (void *)value;
698 }
699
700 struct resv_map *resv_map_alloc(void)
701 {
702 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
703 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
704
705 if (!resv_map || !rg) {
706 kfree(resv_map);
707 kfree(rg);
708 return NULL;
709 }
710
711 kref_init(&resv_map->refs);
712 spin_lock_init(&resv_map->lock);
713 INIT_LIST_HEAD(&resv_map->regions);
714
715 resv_map->adds_in_progress = 0;
716
717 INIT_LIST_HEAD(&resv_map->region_cache);
718 list_add(&rg->link, &resv_map->region_cache);
719 resv_map->region_cache_count = 1;
720
721 return resv_map;
722 }
723
724 void resv_map_release(struct kref *ref)
725 {
726 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
727 struct list_head *head = &resv_map->region_cache;
728 struct file_region *rg, *trg;
729
730 /* Clear out any active regions before we release the map. */
731 region_del(resv_map, 0, LONG_MAX);
732
733 /* ... and any entries left in the cache */
734 list_for_each_entry_safe(rg, trg, head, link) {
735 list_del(&rg->link);
736 kfree(rg);
737 }
738
739 VM_BUG_ON(resv_map->adds_in_progress);
740
741 kfree(resv_map);
742 }
743
744 static inline struct resv_map *inode_resv_map(struct inode *inode)
745 {
746 return inode->i_mapping->private_data;
747 }
748
749 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
750 {
751 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
752 if (vma->vm_flags & VM_MAYSHARE) {
753 struct address_space *mapping = vma->vm_file->f_mapping;
754 struct inode *inode = mapping->host;
755
756 return inode_resv_map(inode);
757
758 } else {
759 return (struct resv_map *)(get_vma_private_data(vma) &
760 ~HPAGE_RESV_MASK);
761 }
762 }
763
764 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
765 {
766 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
767 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
768
769 set_vma_private_data(vma, (get_vma_private_data(vma) &
770 HPAGE_RESV_MASK) | (unsigned long)map);
771 }
772
773 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
774 {
775 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
776 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
777
778 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
779 }
780
781 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
782 {
783 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
784
785 return (get_vma_private_data(vma) & flag) != 0;
786 }
787
788 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
789 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
790 {
791 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
792 if (!(vma->vm_flags & VM_MAYSHARE))
793 vma->vm_private_data = (void *)0;
794 }
795
796 /* Returns true if the VMA has associated reserve pages */
797 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
798 {
799 if (vma->vm_flags & VM_NORESERVE) {
800 /*
801 * This address is already reserved by other process(chg == 0),
802 * so, we should decrement reserved count. Without decrementing,
803 * reserve count remains after releasing inode, because this
804 * allocated page will go into page cache and is regarded as
805 * coming from reserved pool in releasing step. Currently, we
806 * don't have any other solution to deal with this situation
807 * properly, so add work-around here.
808 */
809 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
810 return true;
811 else
812 return false;
813 }
814
815 /* Shared mappings always use reserves */
816 if (vma->vm_flags & VM_MAYSHARE) {
817 /*
818 * We know VM_NORESERVE is not set. Therefore, there SHOULD
819 * be a region map for all pages. The only situation where
820 * there is no region map is if a hole was punched via
821 * fallocate. In this case, there really are no reverves to
822 * use. This situation is indicated if chg != 0.
823 */
824 if (chg)
825 return false;
826 else
827 return true;
828 }
829
830 /*
831 * Only the process that called mmap() has reserves for
832 * private mappings.
833 */
834 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
835 return true;
836
837 return false;
838 }
839
840 static void enqueue_huge_page(struct hstate *h, struct page *page)
841 {
842 int nid = page_to_nid(page);
843 list_move(&page->lru, &h->hugepage_freelists[nid]);
844 h->free_huge_pages++;
845 h->free_huge_pages_node[nid]++;
846 }
847
848 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
849 {
850 struct page *page;
851
852 list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
853 if (!is_migrate_isolate_page(page))
854 break;
855 /*
856 * if 'non-isolated free hugepage' not found on the list,
857 * the allocation fails.
858 */
859 if (&h->hugepage_freelists[nid] == &page->lru)
860 return NULL;
861 list_move(&page->lru, &h->hugepage_activelist);
862 set_page_refcounted(page);
863 h->free_huge_pages--;
864 h->free_huge_pages_node[nid]--;
865 return page;
866 }
867
868 /* Movability of hugepages depends on migration support. */
869 static inline gfp_t htlb_alloc_mask(struct hstate *h)
870 {
871 if (hugepages_treat_as_movable || hugepage_migration_supported(h))
872 return GFP_HIGHUSER_MOVABLE;
873 else
874 return GFP_HIGHUSER;
875 }
876
877 static struct page *dequeue_huge_page_vma(struct hstate *h,
878 struct vm_area_struct *vma,
879 unsigned long address, int avoid_reserve,
880 long chg)
881 {
882 struct page *page = NULL;
883 struct mempolicy *mpol;
884 nodemask_t *nodemask;
885 struct zonelist *zonelist;
886 struct zone *zone;
887 struct zoneref *z;
888 unsigned int cpuset_mems_cookie;
889
890 /*
891 * A child process with MAP_PRIVATE mappings created by their parent
892 * have no page reserves. This check ensures that reservations are
893 * not "stolen". The child may still get SIGKILLed
894 */
895 if (!vma_has_reserves(vma, chg) &&
896 h->free_huge_pages - h->resv_huge_pages == 0)
897 goto err;
898
899 /* If reserves cannot be used, ensure enough pages are in the pool */
900 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
901 goto err;
902
903 retry_cpuset:
904 cpuset_mems_cookie = read_mems_allowed_begin();
905 zonelist = huge_zonelist(vma, address,
906 htlb_alloc_mask(h), &mpol, &nodemask);
907
908 for_each_zone_zonelist_nodemask(zone, z, zonelist,
909 MAX_NR_ZONES - 1, nodemask) {
910 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
911 page = dequeue_huge_page_node(h, zone_to_nid(zone));
912 if (page) {
913 if (avoid_reserve)
914 break;
915 if (!vma_has_reserves(vma, chg))
916 break;
917
918 SetPagePrivate(page);
919 h->resv_huge_pages--;
920 break;
921 }
922 }
923 }
924
925 mpol_cond_put(mpol);
926 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
927 goto retry_cpuset;
928 return page;
929
930 err:
931 return NULL;
932 }
933
934 /*
935 * common helper functions for hstate_next_node_to_{alloc|free}.
936 * We may have allocated or freed a huge page based on a different
937 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
938 * be outside of *nodes_allowed. Ensure that we use an allowed
939 * node for alloc or free.
940 */
941 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
942 {
943 nid = next_node_in(nid, *nodes_allowed);
944 VM_BUG_ON(nid >= MAX_NUMNODES);
945
946 return nid;
947 }
948
949 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
950 {
951 if (!node_isset(nid, *nodes_allowed))
952 nid = next_node_allowed(nid, nodes_allowed);
953 return nid;
954 }
955
956 /*
957 * returns the previously saved node ["this node"] from which to
958 * allocate a persistent huge page for the pool and advance the
959 * next node from which to allocate, handling wrap at end of node
960 * mask.
961 */
962 static int hstate_next_node_to_alloc(struct hstate *h,
963 nodemask_t *nodes_allowed)
964 {
965 int nid;
966
967 VM_BUG_ON(!nodes_allowed);
968
969 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
970 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
971
972 return nid;
973 }
974
975 /*
976 * helper for free_pool_huge_page() - return the previously saved
977 * node ["this node"] from which to free a huge page. Advance the
978 * next node id whether or not we find a free huge page to free so
979 * that the next attempt to free addresses the next node.
980 */
981 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
982 {
983 int nid;
984
985 VM_BUG_ON(!nodes_allowed);
986
987 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
988 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
989
990 return nid;
991 }
992
993 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
994 for (nr_nodes = nodes_weight(*mask); \
995 nr_nodes > 0 && \
996 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
997 nr_nodes--)
998
999 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1000 for (nr_nodes = nodes_weight(*mask); \
1001 nr_nodes > 0 && \
1002 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1003 nr_nodes--)
1004
1005 #if defined(CONFIG_X86_64) && ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA))
1006 static void destroy_compound_gigantic_page(struct page *page,
1007 unsigned int order)
1008 {
1009 int i;
1010 int nr_pages = 1 << order;
1011 struct page *p = page + 1;
1012
1013 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1014 clear_compound_head(p);
1015 set_page_refcounted(p);
1016 }
1017
1018 set_compound_order(page, 0);
1019 __ClearPageHead(page);
1020 }
1021
1022 static void free_gigantic_page(struct page *page, unsigned int order)
1023 {
1024 free_contig_range(page_to_pfn(page), 1 << order);
1025 }
1026
1027 static int __alloc_gigantic_page(unsigned long start_pfn,
1028 unsigned long nr_pages)
1029 {
1030 unsigned long end_pfn = start_pfn + nr_pages;
1031 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1032 }
1033
1034 static bool pfn_range_valid_gigantic(unsigned long start_pfn,
1035 unsigned long nr_pages)
1036 {
1037 unsigned long i, end_pfn = start_pfn + nr_pages;
1038 struct page *page;
1039
1040 for (i = start_pfn; i < end_pfn; i++) {
1041 if (!pfn_valid(i))
1042 return false;
1043
1044 page = pfn_to_page(i);
1045
1046 if (PageReserved(page))
1047 return false;
1048
1049 if (page_count(page) > 0)
1050 return false;
1051
1052 if (PageHuge(page))
1053 return false;
1054 }
1055
1056 return true;
1057 }
1058
1059 static bool zone_spans_last_pfn(const struct zone *zone,
1060 unsigned long start_pfn, unsigned long nr_pages)
1061 {
1062 unsigned long last_pfn = start_pfn + nr_pages - 1;
1063 return zone_spans_pfn(zone, last_pfn);
1064 }
1065
1066 static struct page *alloc_gigantic_page(int nid, unsigned int order)
1067 {
1068 unsigned long nr_pages = 1 << order;
1069 unsigned long ret, pfn, flags;
1070 struct zone *z;
1071
1072 z = NODE_DATA(nid)->node_zones;
1073 for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
1074 spin_lock_irqsave(&z->lock, flags);
1075
1076 pfn = ALIGN(z->zone_start_pfn, nr_pages);
1077 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
1078 if (pfn_range_valid_gigantic(pfn, nr_pages)) {
1079 /*
1080 * We release the zone lock here because
1081 * alloc_contig_range() will also lock the zone
1082 * at some point. If there's an allocation
1083 * spinning on this lock, it may win the race
1084 * and cause alloc_contig_range() to fail...
1085 */
1086 spin_unlock_irqrestore(&z->lock, flags);
1087 ret = __alloc_gigantic_page(pfn, nr_pages);
1088 if (!ret)
1089 return pfn_to_page(pfn);
1090 spin_lock_irqsave(&z->lock, flags);
1091 }
1092 pfn += nr_pages;
1093 }
1094
1095 spin_unlock_irqrestore(&z->lock, flags);
1096 }
1097
1098 return NULL;
1099 }
1100
1101 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1102 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1103
1104 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1105 {
1106 struct page *page;
1107
1108 page = alloc_gigantic_page(nid, huge_page_order(h));
1109 if (page) {
1110 prep_compound_gigantic_page(page, huge_page_order(h));
1111 prep_new_huge_page(h, page, nid);
1112 }
1113
1114 return page;
1115 }
1116
1117 static int alloc_fresh_gigantic_page(struct hstate *h,
1118 nodemask_t *nodes_allowed)
1119 {
1120 struct page *page = NULL;
1121 int nr_nodes, node;
1122
1123 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1124 page = alloc_fresh_gigantic_page_node(h, node);
1125 if (page)
1126 return 1;
1127 }
1128
1129 return 0;
1130 }
1131
1132 static inline bool gigantic_page_supported(void) { return true; }
1133 #else
1134 static inline bool gigantic_page_supported(void) { return false; }
1135 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1136 static inline void destroy_compound_gigantic_page(struct page *page,
1137 unsigned int order) { }
1138 static inline int alloc_fresh_gigantic_page(struct hstate *h,
1139 nodemask_t *nodes_allowed) { return 0; }
1140 #endif
1141
1142 static void update_and_free_page(struct hstate *h, struct page *page)
1143 {
1144 int i;
1145
1146 if (hstate_is_gigantic(h) && !gigantic_page_supported())
1147 return;
1148
1149 h->nr_huge_pages--;
1150 h->nr_huge_pages_node[page_to_nid(page)]--;
1151 for (i = 0; i < pages_per_huge_page(h); i++) {
1152 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1153 1 << PG_referenced | 1 << PG_dirty |
1154 1 << PG_active | 1 << PG_private |
1155 1 << PG_writeback);
1156 }
1157 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1158 set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1159 set_page_refcounted(page);
1160 if (hstate_is_gigantic(h)) {
1161 destroy_compound_gigantic_page(page, huge_page_order(h));
1162 free_gigantic_page(page, huge_page_order(h));
1163 } else {
1164 __free_pages(page, huge_page_order(h));
1165 }
1166 }
1167
1168 struct hstate *size_to_hstate(unsigned long size)
1169 {
1170 struct hstate *h;
1171
1172 for_each_hstate(h) {
1173 if (huge_page_size(h) == size)
1174 return h;
1175 }
1176 return NULL;
1177 }
1178
1179 /*
1180 * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1181 * to hstate->hugepage_activelist.)
1182 *
1183 * This function can be called for tail pages, but never returns true for them.
1184 */
1185 bool page_huge_active(struct page *page)
1186 {
1187 VM_BUG_ON_PAGE(!PageHuge(page), page);
1188 return PageHead(page) && PagePrivate(&page[1]);
1189 }
1190
1191 /* never called for tail page */
1192 static void set_page_huge_active(struct page *page)
1193 {
1194 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1195 SetPagePrivate(&page[1]);
1196 }
1197
1198 static void clear_page_huge_active(struct page *page)
1199 {
1200 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1201 ClearPagePrivate(&page[1]);
1202 }
1203
1204 void free_huge_page(struct page *page)
1205 {
1206 /*
1207 * Can't pass hstate in here because it is called from the
1208 * compound page destructor.
1209 */
1210 struct hstate *h = page_hstate(page);
1211 int nid = page_to_nid(page);
1212 struct hugepage_subpool *spool =
1213 (struct hugepage_subpool *)page_private(page);
1214 bool restore_reserve;
1215
1216 set_page_private(page, 0);
1217 page->mapping = NULL;
1218 VM_BUG_ON_PAGE(page_count(page), page);
1219 VM_BUG_ON_PAGE(page_mapcount(page), page);
1220 restore_reserve = PagePrivate(page);
1221 ClearPagePrivate(page);
1222
1223 /*
1224 * A return code of zero implies that the subpool will be under its
1225 * minimum size if the reservation is not restored after page is free.
1226 * Therefore, force restore_reserve operation.
1227 */
1228 if (hugepage_subpool_put_pages(spool, 1) == 0)
1229 restore_reserve = true;
1230
1231 spin_lock(&hugetlb_lock);
1232 clear_page_huge_active(page);
1233 hugetlb_cgroup_uncharge_page(hstate_index(h),
1234 pages_per_huge_page(h), page);
1235 if (restore_reserve)
1236 h->resv_huge_pages++;
1237
1238 if (h->surplus_huge_pages_node[nid]) {
1239 /* remove the page from active list */
1240 list_del(&page->lru);
1241 update_and_free_page(h, page);
1242 h->surplus_huge_pages--;
1243 h->surplus_huge_pages_node[nid]--;
1244 } else {
1245 arch_clear_hugepage_flags(page);
1246 enqueue_huge_page(h, page);
1247 }
1248 spin_unlock(&hugetlb_lock);
1249 }
1250
1251 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1252 {
1253 INIT_LIST_HEAD(&page->lru);
1254 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1255 spin_lock(&hugetlb_lock);
1256 set_hugetlb_cgroup(page, NULL);
1257 h->nr_huge_pages++;
1258 h->nr_huge_pages_node[nid]++;
1259 spin_unlock(&hugetlb_lock);
1260 put_page(page); /* free it into the hugepage allocator */
1261 }
1262
1263 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1264 {
1265 int i;
1266 int nr_pages = 1 << order;
1267 struct page *p = page + 1;
1268
1269 /* we rely on prep_new_huge_page to set the destructor */
1270 set_compound_order(page, order);
1271 __ClearPageReserved(page);
1272 __SetPageHead(page);
1273 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1274 /*
1275 * For gigantic hugepages allocated through bootmem at
1276 * boot, it's safer to be consistent with the not-gigantic
1277 * hugepages and clear the PG_reserved bit from all tail pages
1278 * too. Otherwse drivers using get_user_pages() to access tail
1279 * pages may get the reference counting wrong if they see
1280 * PG_reserved set on a tail page (despite the head page not
1281 * having PG_reserved set). Enforcing this consistency between
1282 * head and tail pages allows drivers to optimize away a check
1283 * on the head page when they need know if put_page() is needed
1284 * after get_user_pages().
1285 */
1286 __ClearPageReserved(p);
1287 set_page_count(p, 0);
1288 set_compound_head(p, page);
1289 }
1290 atomic_set(compound_mapcount_ptr(page), -1);
1291 }
1292
1293 /*
1294 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1295 * transparent huge pages. See the PageTransHuge() documentation for more
1296 * details.
1297 */
1298 int PageHuge(struct page *page)
1299 {
1300 if (!PageCompound(page))
1301 return 0;
1302
1303 page = compound_head(page);
1304 return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1305 }
1306 EXPORT_SYMBOL_GPL(PageHuge);
1307
1308 /*
1309 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1310 * normal or transparent huge pages.
1311 */
1312 int PageHeadHuge(struct page *page_head)
1313 {
1314 if (!PageHead(page_head))
1315 return 0;
1316
1317 return get_compound_page_dtor(page_head) == free_huge_page;
1318 }
1319
1320 pgoff_t __basepage_index(struct page *page)
1321 {
1322 struct page *page_head = compound_head(page);
1323 pgoff_t index = page_index(page_head);
1324 unsigned long compound_idx;
1325
1326 if (!PageHuge(page_head))
1327 return page_index(page);
1328
1329 if (compound_order(page_head) >= MAX_ORDER)
1330 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1331 else
1332 compound_idx = page - page_head;
1333
1334 return (index << compound_order(page_head)) + compound_idx;
1335 }
1336
1337 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1338 {
1339 struct page *page;
1340
1341 page = __alloc_pages_node(nid,
1342 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1343 __GFP_REPEAT|__GFP_NOWARN,
1344 huge_page_order(h));
1345 if (page) {
1346 prep_new_huge_page(h, page, nid);
1347 }
1348
1349 return page;
1350 }
1351
1352 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1353 {
1354 struct page *page;
1355 int nr_nodes, node;
1356 int ret = 0;
1357
1358 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1359 page = alloc_fresh_huge_page_node(h, node);
1360 if (page) {
1361 ret = 1;
1362 break;
1363 }
1364 }
1365
1366 if (ret)
1367 count_vm_event(HTLB_BUDDY_PGALLOC);
1368 else
1369 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1370
1371 return ret;
1372 }
1373
1374 /*
1375 * Free huge page from pool from next node to free.
1376 * Attempt to keep persistent huge pages more or less
1377 * balanced over allowed nodes.
1378 * Called with hugetlb_lock locked.
1379 */
1380 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1381 bool acct_surplus)
1382 {
1383 int nr_nodes, node;
1384 int ret = 0;
1385
1386 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1387 /*
1388 * If we're returning unused surplus pages, only examine
1389 * nodes with surplus pages.
1390 */
1391 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1392 !list_empty(&h->hugepage_freelists[node])) {
1393 struct page *page =
1394 list_entry(h->hugepage_freelists[node].next,
1395 struct page, lru);
1396 list_del(&page->lru);
1397 h->free_huge_pages--;
1398 h->free_huge_pages_node[node]--;
1399 if (acct_surplus) {
1400 h->surplus_huge_pages--;
1401 h->surplus_huge_pages_node[node]--;
1402 }
1403 update_and_free_page(h, page);
1404 ret = 1;
1405 break;
1406 }
1407 }
1408
1409 return ret;
1410 }
1411
1412 /*
1413 * Dissolve a given free hugepage into free buddy pages. This function does
1414 * nothing for in-use (including surplus) hugepages.
1415 */
1416 static void dissolve_free_huge_page(struct page *page)
1417 {
1418 spin_lock(&hugetlb_lock);
1419 if (PageHuge(page) && !page_count(page)) {
1420 struct hstate *h = page_hstate(page);
1421 int nid = page_to_nid(page);
1422 list_del(&page->lru);
1423 h->free_huge_pages--;
1424 h->free_huge_pages_node[nid]--;
1425 update_and_free_page(h, page);
1426 }
1427 spin_unlock(&hugetlb_lock);
1428 }
1429
1430 /*
1431 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1432 * make specified memory blocks removable from the system.
1433 * Note that start_pfn should aligned with (minimum) hugepage size.
1434 */
1435 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1436 {
1437 unsigned long pfn;
1438
1439 if (!hugepages_supported())
1440 return;
1441
1442 VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
1443 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
1444 dissolve_free_huge_page(pfn_to_page(pfn));
1445 }
1446
1447 /*
1448 * There are 3 ways this can get called:
1449 * 1. With vma+addr: we use the VMA's memory policy
1450 * 2. With !vma, but nid=NUMA_NO_NODE: We try to allocate a huge
1451 * page from any node, and let the buddy allocator itself figure
1452 * it out.
1453 * 3. With !vma, but nid!=NUMA_NO_NODE. We allocate a huge page
1454 * strictly from 'nid'
1455 */
1456 static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1457 struct vm_area_struct *vma, unsigned long addr, int nid)
1458 {
1459 int order = huge_page_order(h);
1460 gfp_t gfp = htlb_alloc_mask(h)|__GFP_COMP|__GFP_REPEAT|__GFP_NOWARN;
1461 unsigned int cpuset_mems_cookie;
1462
1463 /*
1464 * We need a VMA to get a memory policy. If we do not
1465 * have one, we use the 'nid' argument.
1466 *
1467 * The mempolicy stuff below has some non-inlined bits
1468 * and calls ->vm_ops. That makes it hard to optimize at
1469 * compile-time, even when NUMA is off and it does
1470 * nothing. This helps the compiler optimize it out.
1471 */
1472 if (!IS_ENABLED(CONFIG_NUMA) || !vma) {
1473 /*
1474 * If a specific node is requested, make sure to
1475 * get memory from there, but only when a node
1476 * is explicitly specified.
1477 */
1478 if (nid != NUMA_NO_NODE)
1479 gfp |= __GFP_THISNODE;
1480 /*
1481 * Make sure to call something that can handle
1482 * nid=NUMA_NO_NODE
1483 */
1484 return alloc_pages_node(nid, gfp, order);
1485 }
1486
1487 /*
1488 * OK, so we have a VMA. Fetch the mempolicy and try to
1489 * allocate a huge page with it. We will only reach this
1490 * when CONFIG_NUMA=y.
1491 */
1492 do {
1493 struct page *page;
1494 struct mempolicy *mpol;
1495 struct zonelist *zl;
1496 nodemask_t *nodemask;
1497
1498 cpuset_mems_cookie = read_mems_allowed_begin();
1499 zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask);
1500 mpol_cond_put(mpol);
1501 page = __alloc_pages_nodemask(gfp, order, zl, nodemask);
1502 if (page)
1503 return page;
1504 } while (read_mems_allowed_retry(cpuset_mems_cookie));
1505
1506 return NULL;
1507 }
1508
1509 /*
1510 * There are two ways to allocate a huge page:
1511 * 1. When you have a VMA and an address (like a fault)
1512 * 2. When you have no VMA (like when setting /proc/.../nr_hugepages)
1513 *
1514 * 'vma' and 'addr' are only for (1). 'nid' is always NUMA_NO_NODE in
1515 * this case which signifies that the allocation should be done with
1516 * respect for the VMA's memory policy.
1517 *
1518 * For (2), we ignore 'vma' and 'addr' and use 'nid' exclusively. This
1519 * implies that memory policies will not be taken in to account.
1520 */
1521 static struct page *__alloc_buddy_huge_page(struct hstate *h,
1522 struct vm_area_struct *vma, unsigned long addr, int nid)
1523 {
1524 struct page *page;
1525 unsigned int r_nid;
1526
1527 if (hstate_is_gigantic(h))
1528 return NULL;
1529
1530 /*
1531 * Make sure that anyone specifying 'nid' is not also specifying a VMA.
1532 * This makes sure the caller is picking _one_ of the modes with which
1533 * we can call this function, not both.
1534 */
1535 if (vma || (addr != -1)) {
1536 VM_WARN_ON_ONCE(addr == -1);
1537 VM_WARN_ON_ONCE(nid != NUMA_NO_NODE);
1538 }
1539 /*
1540 * Assume we will successfully allocate the surplus page to
1541 * prevent racing processes from causing the surplus to exceed
1542 * overcommit
1543 *
1544 * This however introduces a different race, where a process B
1545 * tries to grow the static hugepage pool while alloc_pages() is
1546 * called by process A. B will only examine the per-node
1547 * counters in determining if surplus huge pages can be
1548 * converted to normal huge pages in adjust_pool_surplus(). A
1549 * won't be able to increment the per-node counter, until the
1550 * lock is dropped by B, but B doesn't drop hugetlb_lock until
1551 * no more huge pages can be converted from surplus to normal
1552 * state (and doesn't try to convert again). Thus, we have a
1553 * case where a surplus huge page exists, the pool is grown, and
1554 * the surplus huge page still exists after, even though it
1555 * should just have been converted to a normal huge page. This
1556 * does not leak memory, though, as the hugepage will be freed
1557 * once it is out of use. It also does not allow the counters to
1558 * go out of whack in adjust_pool_surplus() as we don't modify
1559 * the node values until we've gotten the hugepage and only the
1560 * per-node value is checked there.
1561 */
1562 spin_lock(&hugetlb_lock);
1563 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1564 spin_unlock(&hugetlb_lock);
1565 return NULL;
1566 } else {
1567 h->nr_huge_pages++;
1568 h->surplus_huge_pages++;
1569 }
1570 spin_unlock(&hugetlb_lock);
1571
1572 page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid);
1573
1574 spin_lock(&hugetlb_lock);
1575 if (page) {
1576 INIT_LIST_HEAD(&page->lru);
1577 r_nid = page_to_nid(page);
1578 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1579 set_hugetlb_cgroup(page, NULL);
1580 /*
1581 * We incremented the global counters already
1582 */
1583 h->nr_huge_pages_node[r_nid]++;
1584 h->surplus_huge_pages_node[r_nid]++;
1585 __count_vm_event(HTLB_BUDDY_PGALLOC);
1586 } else {
1587 h->nr_huge_pages--;
1588 h->surplus_huge_pages--;
1589 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1590 }
1591 spin_unlock(&hugetlb_lock);
1592
1593 return page;
1594 }
1595
1596 /*
1597 * Allocate a huge page from 'nid'. Note, 'nid' may be
1598 * NUMA_NO_NODE, which means that it may be allocated
1599 * anywhere.
1600 */
1601 static
1602 struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid)
1603 {
1604 unsigned long addr = -1;
1605
1606 return __alloc_buddy_huge_page(h, NULL, addr, nid);
1607 }
1608
1609 /*
1610 * Use the VMA's mpolicy to allocate a huge page from the buddy.
1611 */
1612 static
1613 struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
1614 struct vm_area_struct *vma, unsigned long addr)
1615 {
1616 return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE);
1617 }
1618
1619 /*
1620 * This allocation function is useful in the context where vma is irrelevant.
1621 * E.g. soft-offlining uses this function because it only cares physical
1622 * address of error page.
1623 */
1624 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1625 {
1626 struct page *page = NULL;
1627
1628 spin_lock(&hugetlb_lock);
1629 if (h->free_huge_pages - h->resv_huge_pages > 0)
1630 page = dequeue_huge_page_node(h, nid);
1631 spin_unlock(&hugetlb_lock);
1632
1633 if (!page)
1634 page = __alloc_buddy_huge_page_no_mpol(h, nid);
1635
1636 return page;
1637 }
1638
1639 /*
1640 * Increase the hugetlb pool such that it can accommodate a reservation
1641 * of size 'delta'.
1642 */
1643 static int gather_surplus_pages(struct hstate *h, int delta)
1644 {
1645 struct list_head surplus_list;
1646 struct page *page, *tmp;
1647 int ret, i;
1648 int needed, allocated;
1649 bool alloc_ok = true;
1650
1651 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1652 if (needed <= 0) {
1653 h->resv_huge_pages += delta;
1654 return 0;
1655 }
1656
1657 allocated = 0;
1658 INIT_LIST_HEAD(&surplus_list);
1659
1660 ret = -ENOMEM;
1661 retry:
1662 spin_unlock(&hugetlb_lock);
1663 for (i = 0; i < needed; i++) {
1664 page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE);
1665 if (!page) {
1666 alloc_ok = false;
1667 break;
1668 }
1669 list_add(&page->lru, &surplus_list);
1670 }
1671 allocated += i;
1672
1673 /*
1674 * After retaking hugetlb_lock, we need to recalculate 'needed'
1675 * because either resv_huge_pages or free_huge_pages may have changed.
1676 */
1677 spin_lock(&hugetlb_lock);
1678 needed = (h->resv_huge_pages + delta) -
1679 (h->free_huge_pages + allocated);
1680 if (needed > 0) {
1681 if (alloc_ok)
1682 goto retry;
1683 /*
1684 * We were not able to allocate enough pages to
1685 * satisfy the entire reservation so we free what
1686 * we've allocated so far.
1687 */
1688 goto free;
1689 }
1690 /*
1691 * The surplus_list now contains _at_least_ the number of extra pages
1692 * needed to accommodate the reservation. Add the appropriate number
1693 * of pages to the hugetlb pool and free the extras back to the buddy
1694 * allocator. Commit the entire reservation here to prevent another
1695 * process from stealing the pages as they are added to the pool but
1696 * before they are reserved.
1697 */
1698 needed += allocated;
1699 h->resv_huge_pages += delta;
1700 ret = 0;
1701
1702 /* Free the needed pages to the hugetlb pool */
1703 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1704 if ((--needed) < 0)
1705 break;
1706 /*
1707 * This page is now managed by the hugetlb allocator and has
1708 * no users -- drop the buddy allocator's reference.
1709 */
1710 put_page_testzero(page);
1711 VM_BUG_ON_PAGE(page_count(page), page);
1712 enqueue_huge_page(h, page);
1713 }
1714 free:
1715 spin_unlock(&hugetlb_lock);
1716
1717 /* Free unnecessary surplus pages to the buddy allocator */
1718 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1719 put_page(page);
1720 spin_lock(&hugetlb_lock);
1721
1722 return ret;
1723 }
1724
1725 /*
1726 * When releasing a hugetlb pool reservation, any surplus pages that were
1727 * allocated to satisfy the reservation must be explicitly freed if they were
1728 * never used.
1729 * Called with hugetlb_lock held.
1730 */
1731 static void return_unused_surplus_pages(struct hstate *h,
1732 unsigned long unused_resv_pages)
1733 {
1734 unsigned long nr_pages;
1735
1736 /* Uncommit the reservation */
1737 h->resv_huge_pages -= unused_resv_pages;
1738
1739 /* Cannot return gigantic pages currently */
1740 if (hstate_is_gigantic(h))
1741 return;
1742
1743 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1744
1745 /*
1746 * We want to release as many surplus pages as possible, spread
1747 * evenly across all nodes with memory. Iterate across these nodes
1748 * until we can no longer free unreserved surplus pages. This occurs
1749 * when the nodes with surplus pages have no free pages.
1750 * free_pool_huge_page() will balance the the freed pages across the
1751 * on-line nodes with memory and will handle the hstate accounting.
1752 */
1753 while (nr_pages--) {
1754 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1755 break;
1756 cond_resched_lock(&hugetlb_lock);
1757 }
1758 }
1759
1760
1761 /*
1762 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1763 * are used by the huge page allocation routines to manage reservations.
1764 *
1765 * vma_needs_reservation is called to determine if the huge page at addr
1766 * within the vma has an associated reservation. If a reservation is
1767 * needed, the value 1 is returned. The caller is then responsible for
1768 * managing the global reservation and subpool usage counts. After
1769 * the huge page has been allocated, vma_commit_reservation is called
1770 * to add the page to the reservation map. If the page allocation fails,
1771 * the reservation must be ended instead of committed. vma_end_reservation
1772 * is called in such cases.
1773 *
1774 * In the normal case, vma_commit_reservation returns the same value
1775 * as the preceding vma_needs_reservation call. The only time this
1776 * is not the case is if a reserve map was changed between calls. It
1777 * is the responsibility of the caller to notice the difference and
1778 * take appropriate action.
1779 */
1780 enum vma_resv_mode {
1781 VMA_NEEDS_RESV,
1782 VMA_COMMIT_RESV,
1783 VMA_END_RESV,
1784 };
1785 static long __vma_reservation_common(struct hstate *h,
1786 struct vm_area_struct *vma, unsigned long addr,
1787 enum vma_resv_mode mode)
1788 {
1789 struct resv_map *resv;
1790 pgoff_t idx;
1791 long ret;
1792
1793 resv = vma_resv_map(vma);
1794 if (!resv)
1795 return 1;
1796
1797 idx = vma_hugecache_offset(h, vma, addr);
1798 switch (mode) {
1799 case VMA_NEEDS_RESV:
1800 ret = region_chg(resv, idx, idx + 1);
1801 break;
1802 case VMA_COMMIT_RESV:
1803 ret = region_add(resv, idx, idx + 1);
1804 break;
1805 case VMA_END_RESV:
1806 region_abort(resv, idx, idx + 1);
1807 ret = 0;
1808 break;
1809 default:
1810 BUG();
1811 }
1812
1813 if (vma->vm_flags & VM_MAYSHARE)
1814 return ret;
1815 else
1816 return ret < 0 ? ret : 0;
1817 }
1818
1819 static long vma_needs_reservation(struct hstate *h,
1820 struct vm_area_struct *vma, unsigned long addr)
1821 {
1822 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1823 }
1824
1825 static long vma_commit_reservation(struct hstate *h,
1826 struct vm_area_struct *vma, unsigned long addr)
1827 {
1828 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1829 }
1830
1831 static void vma_end_reservation(struct hstate *h,
1832 struct vm_area_struct *vma, unsigned long addr)
1833 {
1834 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1835 }
1836
1837 struct page *alloc_huge_page(struct vm_area_struct *vma,
1838 unsigned long addr, int avoid_reserve)
1839 {
1840 struct hugepage_subpool *spool = subpool_vma(vma);
1841 struct hstate *h = hstate_vma(vma);
1842 struct page *page;
1843 long map_chg, map_commit;
1844 long gbl_chg;
1845 int ret, idx;
1846 struct hugetlb_cgroup *h_cg;
1847
1848 idx = hstate_index(h);
1849 /*
1850 * Examine the region/reserve map to determine if the process
1851 * has a reservation for the page to be allocated. A return
1852 * code of zero indicates a reservation exists (no change).
1853 */
1854 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
1855 if (map_chg < 0)
1856 return ERR_PTR(-ENOMEM);
1857
1858 /*
1859 * Processes that did not create the mapping will have no
1860 * reserves as indicated by the region/reserve map. Check
1861 * that the allocation will not exceed the subpool limit.
1862 * Allocations for MAP_NORESERVE mappings also need to be
1863 * checked against any subpool limit.
1864 */
1865 if (map_chg || avoid_reserve) {
1866 gbl_chg = hugepage_subpool_get_pages(spool, 1);
1867 if (gbl_chg < 0) {
1868 vma_end_reservation(h, vma, addr);
1869 return ERR_PTR(-ENOSPC);
1870 }
1871
1872 /*
1873 * Even though there was no reservation in the region/reserve
1874 * map, there could be reservations associated with the
1875 * subpool that can be used. This would be indicated if the
1876 * return value of hugepage_subpool_get_pages() is zero.
1877 * However, if avoid_reserve is specified we still avoid even
1878 * the subpool reservations.
1879 */
1880 if (avoid_reserve)
1881 gbl_chg = 1;
1882 }
1883
1884 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1885 if (ret)
1886 goto out_subpool_put;
1887
1888 spin_lock(&hugetlb_lock);
1889 /*
1890 * glb_chg is passed to indicate whether or not a page must be taken
1891 * from the global free pool (global change). gbl_chg == 0 indicates
1892 * a reservation exists for the allocation.
1893 */
1894 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
1895 if (!page) {
1896 spin_unlock(&hugetlb_lock);
1897 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
1898 if (!page)
1899 goto out_uncharge_cgroup;
1900 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
1901 SetPagePrivate(page);
1902 h->resv_huge_pages--;
1903 }
1904 spin_lock(&hugetlb_lock);
1905 list_move(&page->lru, &h->hugepage_activelist);
1906 /* Fall through */
1907 }
1908 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1909 spin_unlock(&hugetlb_lock);
1910
1911 set_page_private(page, (unsigned long)spool);
1912
1913 map_commit = vma_commit_reservation(h, vma, addr);
1914 if (unlikely(map_chg > map_commit)) {
1915 /*
1916 * The page was added to the reservation map between
1917 * vma_needs_reservation and vma_commit_reservation.
1918 * This indicates a race with hugetlb_reserve_pages.
1919 * Adjust for the subpool count incremented above AND
1920 * in hugetlb_reserve_pages for the same page. Also,
1921 * the reservation count added in hugetlb_reserve_pages
1922 * no longer applies.
1923 */
1924 long rsv_adjust;
1925
1926 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
1927 hugetlb_acct_memory(h, -rsv_adjust);
1928 }
1929 return page;
1930
1931 out_uncharge_cgroup:
1932 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1933 out_subpool_put:
1934 if (map_chg || avoid_reserve)
1935 hugepage_subpool_put_pages(spool, 1);
1936 vma_end_reservation(h, vma, addr);
1937 return ERR_PTR(-ENOSPC);
1938 }
1939
1940 /*
1941 * alloc_huge_page()'s wrapper which simply returns the page if allocation
1942 * succeeds, otherwise NULL. This function is called from new_vma_page(),
1943 * where no ERR_VALUE is expected to be returned.
1944 */
1945 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1946 unsigned long addr, int avoid_reserve)
1947 {
1948 struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1949 if (IS_ERR(page))
1950 page = NULL;
1951 return page;
1952 }
1953
1954 int __weak alloc_bootmem_huge_page(struct hstate *h)
1955 {
1956 struct huge_bootmem_page *m;
1957 int nr_nodes, node;
1958
1959 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1960 void *addr;
1961
1962 addr = memblock_virt_alloc_try_nid_nopanic(
1963 huge_page_size(h), huge_page_size(h),
1964 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
1965 if (addr) {
1966 /*
1967 * Use the beginning of the huge page to store the
1968 * huge_bootmem_page struct (until gather_bootmem
1969 * puts them into the mem_map).
1970 */
1971 m = addr;
1972 goto found;
1973 }
1974 }
1975 return 0;
1976
1977 found:
1978 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
1979 /* Put them into a private list first because mem_map is not up yet */
1980 list_add(&m->list, &huge_boot_pages);
1981 m->hstate = h;
1982 return 1;
1983 }
1984
1985 static void __init prep_compound_huge_page(struct page *page,
1986 unsigned int order)
1987 {
1988 if (unlikely(order > (MAX_ORDER - 1)))
1989 prep_compound_gigantic_page(page, order);
1990 else
1991 prep_compound_page(page, order);
1992 }
1993
1994 /* Put bootmem huge pages into the standard lists after mem_map is up */
1995 static void __init gather_bootmem_prealloc(void)
1996 {
1997 struct huge_bootmem_page *m;
1998
1999 list_for_each_entry(m, &huge_boot_pages, list) {
2000 struct hstate *h = m->hstate;
2001 struct page *page;
2002
2003 #ifdef CONFIG_HIGHMEM
2004 page = pfn_to_page(m->phys >> PAGE_SHIFT);
2005 memblock_free_late(__pa(m),
2006 sizeof(struct huge_bootmem_page));
2007 #else
2008 page = virt_to_page(m);
2009 #endif
2010 WARN_ON(page_count(page) != 1);
2011 prep_compound_huge_page(page, h->order);
2012 WARN_ON(PageReserved(page));
2013 prep_new_huge_page(h, page, page_to_nid(page));
2014 /*
2015 * If we had gigantic hugepages allocated at boot time, we need
2016 * to restore the 'stolen' pages to totalram_pages in order to
2017 * fix confusing memory reports from free(1) and another
2018 * side-effects, like CommitLimit going negative.
2019 */
2020 if (hstate_is_gigantic(h))
2021 adjust_managed_page_count(page, 1 << h->order);
2022 }
2023 }
2024
2025 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2026 {
2027 unsigned long i;
2028
2029 for (i = 0; i < h->max_huge_pages; ++i) {
2030 if (hstate_is_gigantic(h)) {
2031 if (!alloc_bootmem_huge_page(h))
2032 break;
2033 } else if (!alloc_fresh_huge_page(h,
2034 &node_states[N_MEMORY]))
2035 break;
2036 }
2037 h->max_huge_pages = i;
2038 }
2039
2040 static void __init hugetlb_init_hstates(void)
2041 {
2042 struct hstate *h;
2043
2044 for_each_hstate(h) {
2045 if (minimum_order > huge_page_order(h))
2046 minimum_order = huge_page_order(h);
2047
2048 /* oversize hugepages were init'ed in early boot */
2049 if (!hstate_is_gigantic(h))
2050 hugetlb_hstate_alloc_pages(h);
2051 }
2052 VM_BUG_ON(minimum_order == UINT_MAX);
2053 }
2054
2055 static char * __init memfmt(char *buf, unsigned long n)
2056 {
2057 if (n >= (1UL << 30))
2058 sprintf(buf, "%lu GB", n >> 30);
2059 else if (n >= (1UL << 20))
2060 sprintf(buf, "%lu MB", n >> 20);
2061 else
2062 sprintf(buf, "%lu KB", n >> 10);
2063 return buf;
2064 }
2065
2066 static void __init report_hugepages(void)
2067 {
2068 struct hstate *h;
2069
2070 for_each_hstate(h) {
2071 char buf[32];
2072 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2073 memfmt(buf, huge_page_size(h)),
2074 h->free_huge_pages);
2075 }
2076 }
2077
2078 #ifdef CONFIG_HIGHMEM
2079 static void try_to_free_low(struct hstate *h, unsigned long count,
2080 nodemask_t *nodes_allowed)
2081 {
2082 int i;
2083
2084 if (hstate_is_gigantic(h))
2085 return;
2086
2087 for_each_node_mask(i, *nodes_allowed) {
2088 struct page *page, *next;
2089 struct list_head *freel = &h->hugepage_freelists[i];
2090 list_for_each_entry_safe(page, next, freel, lru) {
2091 if (count >= h->nr_huge_pages)
2092 return;
2093 if (PageHighMem(page))
2094 continue;
2095 list_del(&page->lru);
2096 update_and_free_page(h, page);
2097 h->free_huge_pages--;
2098 h->free_huge_pages_node[page_to_nid(page)]--;
2099 }
2100 }
2101 }
2102 #else
2103 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2104 nodemask_t *nodes_allowed)
2105 {
2106 }
2107 #endif
2108
2109 /*
2110 * Increment or decrement surplus_huge_pages. Keep node-specific counters
2111 * balanced by operating on them in a round-robin fashion.
2112 * Returns 1 if an adjustment was made.
2113 */
2114 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2115 int delta)
2116 {
2117 int nr_nodes, node;
2118
2119 VM_BUG_ON(delta != -1 && delta != 1);
2120
2121 if (delta < 0) {
2122 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2123 if (h->surplus_huge_pages_node[node])
2124 goto found;
2125 }
2126 } else {
2127 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2128 if (h->surplus_huge_pages_node[node] <
2129 h->nr_huge_pages_node[node])
2130 goto found;
2131 }
2132 }
2133 return 0;
2134
2135 found:
2136 h->surplus_huge_pages += delta;
2137 h->surplus_huge_pages_node[node] += delta;
2138 return 1;
2139 }
2140
2141 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2142 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2143 nodemask_t *nodes_allowed)
2144 {
2145 unsigned long min_count, ret;
2146
2147 if (hstate_is_gigantic(h) && !gigantic_page_supported())
2148 return h->max_huge_pages;
2149
2150 /*
2151 * Increase the pool size
2152 * First take pages out of surplus state. Then make up the
2153 * remaining difference by allocating fresh huge pages.
2154 *
2155 * We might race with __alloc_buddy_huge_page() here and be unable
2156 * to convert a surplus huge page to a normal huge page. That is
2157 * not critical, though, it just means the overall size of the
2158 * pool might be one hugepage larger than it needs to be, but
2159 * within all the constraints specified by the sysctls.
2160 */
2161 spin_lock(&hugetlb_lock);
2162 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2163 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2164 break;
2165 }
2166
2167 while (count > persistent_huge_pages(h)) {
2168 /*
2169 * If this allocation races such that we no longer need the
2170 * page, free_huge_page will handle it by freeing the page
2171 * and reducing the surplus.
2172 */
2173 spin_unlock(&hugetlb_lock);
2174 if (hstate_is_gigantic(h))
2175 ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2176 else
2177 ret = alloc_fresh_huge_page(h, nodes_allowed);
2178 spin_lock(&hugetlb_lock);
2179 if (!ret)
2180 goto out;
2181
2182 /* Bail for signals. Probably ctrl-c from user */
2183 if (signal_pending(current))
2184 goto out;
2185 }
2186
2187 /*
2188 * Decrease the pool size
2189 * First return free pages to the buddy allocator (being careful
2190 * to keep enough around to satisfy reservations). Then place
2191 * pages into surplus state as needed so the pool will shrink
2192 * to the desired size as pages become free.
2193 *
2194 * By placing pages into the surplus state independent of the
2195 * overcommit value, we are allowing the surplus pool size to
2196 * exceed overcommit. There are few sane options here. Since
2197 * __alloc_buddy_huge_page() is checking the global counter,
2198 * though, we'll note that we're not allowed to exceed surplus
2199 * and won't grow the pool anywhere else. Not until one of the
2200 * sysctls are changed, or the surplus pages go out of use.
2201 */
2202 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2203 min_count = max(count, min_count);
2204 try_to_free_low(h, min_count, nodes_allowed);
2205 while (min_count < persistent_huge_pages(h)) {
2206 if (!free_pool_huge_page(h, nodes_allowed, 0))
2207 break;
2208 cond_resched_lock(&hugetlb_lock);
2209 }
2210 while (count < persistent_huge_pages(h)) {
2211 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2212 break;
2213 }
2214 out:
2215 ret = persistent_huge_pages(h);
2216 spin_unlock(&hugetlb_lock);
2217 return ret;
2218 }
2219
2220 #define HSTATE_ATTR_RO(_name) \
2221 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2222
2223 #define HSTATE_ATTR(_name) \
2224 static struct kobj_attribute _name##_attr = \
2225 __ATTR(_name, 0644, _name##_show, _name##_store)
2226
2227 static struct kobject *hugepages_kobj;
2228 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2229
2230 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2231
2232 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2233 {
2234 int i;
2235
2236 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2237 if (hstate_kobjs[i] == kobj) {
2238 if (nidp)
2239 *nidp = NUMA_NO_NODE;
2240 return &hstates[i];
2241 }
2242
2243 return kobj_to_node_hstate(kobj, nidp);
2244 }
2245
2246 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2247 struct kobj_attribute *attr, char *buf)
2248 {
2249 struct hstate *h;
2250 unsigned long nr_huge_pages;
2251 int nid;
2252
2253 h = kobj_to_hstate(kobj, &nid);
2254 if (nid == NUMA_NO_NODE)
2255 nr_huge_pages = h->nr_huge_pages;
2256 else
2257 nr_huge_pages = h->nr_huge_pages_node[nid];
2258
2259 return sprintf(buf, "%lu\n", nr_huge_pages);
2260 }
2261
2262 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2263 struct hstate *h, int nid,
2264 unsigned long count, size_t len)
2265 {
2266 int err;
2267 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2268
2269 if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2270 err = -EINVAL;
2271 goto out;
2272 }
2273
2274 if (nid == NUMA_NO_NODE) {
2275 /*
2276 * global hstate attribute
2277 */
2278 if (!(obey_mempolicy &&
2279 init_nodemask_of_mempolicy(nodes_allowed))) {
2280 NODEMASK_FREE(nodes_allowed);
2281 nodes_allowed = &node_states[N_MEMORY];
2282 }
2283 } else if (nodes_allowed) {
2284 /*
2285 * per node hstate attribute: adjust count to global,
2286 * but restrict alloc/free to the specified node.
2287 */
2288 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2289 init_nodemask_of_node(nodes_allowed, nid);
2290 } else
2291 nodes_allowed = &node_states[N_MEMORY];
2292
2293 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2294
2295 if (nodes_allowed != &node_states[N_MEMORY])
2296 NODEMASK_FREE(nodes_allowed);
2297
2298 return len;
2299 out:
2300 NODEMASK_FREE(nodes_allowed);
2301 return err;
2302 }
2303
2304 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2305 struct kobject *kobj, const char *buf,
2306 size_t len)
2307 {
2308 struct hstate *h;
2309 unsigned long count;
2310 int nid;
2311 int err;
2312
2313 err = kstrtoul(buf, 10, &count);
2314 if (err)
2315 return err;
2316
2317 h = kobj_to_hstate(kobj, &nid);
2318 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2319 }
2320
2321 static ssize_t nr_hugepages_show(struct kobject *kobj,
2322 struct kobj_attribute *attr, char *buf)
2323 {
2324 return nr_hugepages_show_common(kobj, attr, buf);
2325 }
2326
2327 static ssize_t nr_hugepages_store(struct kobject *kobj,
2328 struct kobj_attribute *attr, const char *buf, size_t len)
2329 {
2330 return nr_hugepages_store_common(false, kobj, buf, len);
2331 }
2332 HSTATE_ATTR(nr_hugepages);
2333
2334 #ifdef CONFIG_NUMA
2335
2336 /*
2337 * hstate attribute for optionally mempolicy-based constraint on persistent
2338 * huge page alloc/free.
2339 */
2340 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2341 struct kobj_attribute *attr, char *buf)
2342 {
2343 return nr_hugepages_show_common(kobj, attr, buf);
2344 }
2345
2346 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2347 struct kobj_attribute *attr, const char *buf, size_t len)
2348 {
2349 return nr_hugepages_store_common(true, kobj, buf, len);
2350 }
2351 HSTATE_ATTR(nr_hugepages_mempolicy);
2352 #endif
2353
2354
2355 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2356 struct kobj_attribute *attr, char *buf)
2357 {
2358 struct hstate *h = kobj_to_hstate(kobj, NULL);
2359 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2360 }
2361
2362 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2363 struct kobj_attribute *attr, const char *buf, size_t count)
2364 {
2365 int err;
2366 unsigned long input;
2367 struct hstate *h = kobj_to_hstate(kobj, NULL);
2368
2369 if (hstate_is_gigantic(h))
2370 return -EINVAL;
2371
2372 err = kstrtoul(buf, 10, &input);
2373 if (err)
2374 return err;
2375
2376 spin_lock(&hugetlb_lock);
2377 h->nr_overcommit_huge_pages = input;
2378 spin_unlock(&hugetlb_lock);
2379
2380 return count;
2381 }
2382 HSTATE_ATTR(nr_overcommit_hugepages);
2383
2384 static ssize_t free_hugepages_show(struct kobject *kobj,
2385 struct kobj_attribute *attr, char *buf)
2386 {
2387 struct hstate *h;
2388 unsigned long free_huge_pages;
2389 int nid;
2390
2391 h = kobj_to_hstate(kobj, &nid);
2392 if (nid == NUMA_NO_NODE)
2393 free_huge_pages = h->free_huge_pages;
2394 else
2395 free_huge_pages = h->free_huge_pages_node[nid];
2396
2397 return sprintf(buf, "%lu\n", free_huge_pages);
2398 }
2399 HSTATE_ATTR_RO(free_hugepages);
2400
2401 static ssize_t resv_hugepages_show(struct kobject *kobj,
2402 struct kobj_attribute *attr, char *buf)
2403 {
2404 struct hstate *h = kobj_to_hstate(kobj, NULL);
2405 return sprintf(buf, "%lu\n", h->resv_huge_pages);
2406 }
2407 HSTATE_ATTR_RO(resv_hugepages);
2408
2409 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2410 struct kobj_attribute *attr, char *buf)
2411 {
2412 struct hstate *h;
2413 unsigned long surplus_huge_pages;
2414 int nid;
2415
2416 h = kobj_to_hstate(kobj, &nid);
2417 if (nid == NUMA_NO_NODE)
2418 surplus_huge_pages = h->surplus_huge_pages;
2419 else
2420 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2421
2422 return sprintf(buf, "%lu\n", surplus_huge_pages);
2423 }
2424 HSTATE_ATTR_RO(surplus_hugepages);
2425
2426 static struct attribute *hstate_attrs[] = {
2427 &nr_hugepages_attr.attr,
2428 &nr_overcommit_hugepages_attr.attr,
2429 &free_hugepages_attr.attr,
2430 &resv_hugepages_attr.attr,
2431 &surplus_hugepages_attr.attr,
2432 #ifdef CONFIG_NUMA
2433 &nr_hugepages_mempolicy_attr.attr,
2434 #endif
2435 NULL,
2436 };
2437
2438 static struct attribute_group hstate_attr_group = {
2439 .attrs = hstate_attrs,
2440 };
2441
2442 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2443 struct kobject **hstate_kobjs,
2444 struct attribute_group *hstate_attr_group)
2445 {
2446 int retval;
2447 int hi = hstate_index(h);
2448
2449 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2450 if (!hstate_kobjs[hi])
2451 return -ENOMEM;
2452
2453 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2454 if (retval)
2455 kobject_put(hstate_kobjs[hi]);
2456
2457 return retval;
2458 }
2459
2460 static void __init hugetlb_sysfs_init(void)
2461 {
2462 struct hstate *h;
2463 int err;
2464
2465 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2466 if (!hugepages_kobj)
2467 return;
2468
2469 for_each_hstate(h) {
2470 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2471 hstate_kobjs, &hstate_attr_group);
2472 if (err)
2473 pr_err("Hugetlb: Unable to add hstate %s", h->name);
2474 }
2475 }
2476
2477 #ifdef CONFIG_NUMA
2478
2479 /*
2480 * node_hstate/s - associate per node hstate attributes, via their kobjects,
2481 * with node devices in node_devices[] using a parallel array. The array
2482 * index of a node device or _hstate == node id.
2483 * This is here to avoid any static dependency of the node device driver, in
2484 * the base kernel, on the hugetlb module.
2485 */
2486 struct node_hstate {
2487 struct kobject *hugepages_kobj;
2488 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2489 };
2490 static struct node_hstate node_hstates[MAX_NUMNODES];
2491
2492 /*
2493 * A subset of global hstate attributes for node devices
2494 */
2495 static struct attribute *per_node_hstate_attrs[] = {
2496 &nr_hugepages_attr.attr,
2497 &free_hugepages_attr.attr,
2498 &surplus_hugepages_attr.attr,
2499 NULL,
2500 };
2501
2502 static struct attribute_group per_node_hstate_attr_group = {
2503 .attrs = per_node_hstate_attrs,
2504 };
2505
2506 /*
2507 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2508 * Returns node id via non-NULL nidp.
2509 */
2510 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2511 {
2512 int nid;
2513
2514 for (nid = 0; nid < nr_node_ids; nid++) {
2515 struct node_hstate *nhs = &node_hstates[nid];
2516 int i;
2517 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2518 if (nhs->hstate_kobjs[i] == kobj) {
2519 if (nidp)
2520 *nidp = nid;
2521 return &hstates[i];
2522 }
2523 }
2524
2525 BUG();
2526 return NULL;
2527 }
2528
2529 /*
2530 * Unregister hstate attributes from a single node device.
2531 * No-op if no hstate attributes attached.
2532 */
2533 static void hugetlb_unregister_node(struct node *node)
2534 {
2535 struct hstate *h;
2536 struct node_hstate *nhs = &node_hstates[node->dev.id];
2537
2538 if (!nhs->hugepages_kobj)
2539 return; /* no hstate attributes */
2540
2541 for_each_hstate(h) {
2542 int idx = hstate_index(h);
2543 if (nhs->hstate_kobjs[idx]) {
2544 kobject_put(nhs->hstate_kobjs[idx]);
2545 nhs->hstate_kobjs[idx] = NULL;
2546 }
2547 }
2548
2549 kobject_put(nhs->hugepages_kobj);
2550 nhs->hugepages_kobj = NULL;
2551 }
2552
2553
2554 /*
2555 * Register hstate attributes for a single node device.
2556 * No-op if attributes already registered.
2557 */
2558 static void hugetlb_register_node(struct node *node)
2559 {
2560 struct hstate *h;
2561 struct node_hstate *nhs = &node_hstates[node->dev.id];
2562 int err;
2563
2564 if (nhs->hugepages_kobj)
2565 return; /* already allocated */
2566
2567 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2568 &node->dev.kobj);
2569 if (!nhs->hugepages_kobj)
2570 return;
2571
2572 for_each_hstate(h) {
2573 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2574 nhs->hstate_kobjs,
2575 &per_node_hstate_attr_group);
2576 if (err) {
2577 pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2578 h->name, node->dev.id);
2579 hugetlb_unregister_node(node);
2580 break;
2581 }
2582 }
2583 }
2584
2585 /*
2586 * hugetlb init time: register hstate attributes for all registered node
2587 * devices of nodes that have memory. All on-line nodes should have
2588 * registered their associated device by this time.
2589 */
2590 static void __init hugetlb_register_all_nodes(void)
2591 {
2592 int nid;
2593
2594 for_each_node_state(nid, N_MEMORY) {
2595 struct node *node = node_devices[nid];
2596 if (node->dev.id == nid)
2597 hugetlb_register_node(node);
2598 }
2599
2600 /*
2601 * Let the node device driver know we're here so it can
2602 * [un]register hstate attributes on node hotplug.
2603 */
2604 register_hugetlbfs_with_node(hugetlb_register_node,
2605 hugetlb_unregister_node);
2606 }
2607 #else /* !CONFIG_NUMA */
2608
2609 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2610 {
2611 BUG();
2612 if (nidp)
2613 *nidp = -1;
2614 return NULL;
2615 }
2616
2617 static void hugetlb_register_all_nodes(void) { }
2618
2619 #endif
2620
2621 static int __init hugetlb_init(void)
2622 {
2623 int i;
2624
2625 if (!hugepages_supported())
2626 return 0;
2627
2628 if (!size_to_hstate(default_hstate_size)) {
2629 default_hstate_size = HPAGE_SIZE;
2630 if (!size_to_hstate(default_hstate_size))
2631 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2632 }
2633 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2634 if (default_hstate_max_huge_pages) {
2635 if (!default_hstate.max_huge_pages)
2636 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2637 }
2638
2639 hugetlb_init_hstates();
2640 gather_bootmem_prealloc();
2641 report_hugepages();
2642
2643 hugetlb_sysfs_init();
2644 hugetlb_register_all_nodes();
2645 hugetlb_cgroup_file_init();
2646
2647 #ifdef CONFIG_SMP
2648 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2649 #else
2650 num_fault_mutexes = 1;
2651 #endif
2652 hugetlb_fault_mutex_table =
2653 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2654 BUG_ON(!hugetlb_fault_mutex_table);
2655
2656 for (i = 0; i < num_fault_mutexes; i++)
2657 mutex_init(&hugetlb_fault_mutex_table[i]);
2658 return 0;
2659 }
2660 subsys_initcall(hugetlb_init);
2661
2662 /* Should be called on processing a hugepagesz=... option */
2663 void __init hugetlb_bad_size(void)
2664 {
2665 parsed_valid_hugepagesz = false;
2666 }
2667
2668 void __init hugetlb_add_hstate(unsigned int order)
2669 {
2670 struct hstate *h;
2671 unsigned long i;
2672
2673 if (size_to_hstate(PAGE_SIZE << order)) {
2674 pr_warn("hugepagesz= specified twice, ignoring\n");
2675 return;
2676 }
2677 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2678 BUG_ON(order == 0);
2679 h = &hstates[hugetlb_max_hstate++];
2680 h->order = order;
2681 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2682 h->nr_huge_pages = 0;
2683 h->free_huge_pages = 0;
2684 for (i = 0; i < MAX_NUMNODES; ++i)
2685 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2686 INIT_LIST_HEAD(&h->hugepage_activelist);
2687 h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2688 h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2689 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2690 huge_page_size(h)/1024);
2691
2692 parsed_hstate = h;
2693 }
2694
2695 static int __init hugetlb_nrpages_setup(char *s)
2696 {
2697 unsigned long *mhp;
2698 static unsigned long *last_mhp;
2699
2700 if (!parsed_valid_hugepagesz) {
2701 pr_warn("hugepages = %s preceded by "
2702 "an unsupported hugepagesz, ignoring\n", s);
2703 parsed_valid_hugepagesz = true;
2704 return 1;
2705 }
2706 /*
2707 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2708 * so this hugepages= parameter goes to the "default hstate".
2709 */
2710 else if (!hugetlb_max_hstate)
2711 mhp = &default_hstate_max_huge_pages;
2712 else
2713 mhp = &parsed_hstate->max_huge_pages;
2714
2715 if (mhp == last_mhp) {
2716 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2717 return 1;
2718 }
2719
2720 if (sscanf(s, "%lu", mhp) <= 0)
2721 *mhp = 0;
2722
2723 /*
2724 * Global state is always initialized later in hugetlb_init.
2725 * But we need to allocate >= MAX_ORDER hstates here early to still
2726 * use the bootmem allocator.
2727 */
2728 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2729 hugetlb_hstate_alloc_pages(parsed_hstate);
2730
2731 last_mhp = mhp;
2732
2733 return 1;
2734 }
2735 __setup("hugepages=", hugetlb_nrpages_setup);
2736
2737 static int __init hugetlb_default_setup(char *s)
2738 {
2739 default_hstate_size = memparse(s, &s);
2740 return 1;
2741 }
2742 __setup("default_hugepagesz=", hugetlb_default_setup);
2743
2744 static unsigned int cpuset_mems_nr(unsigned int *array)
2745 {
2746 int node;
2747 unsigned int nr = 0;
2748
2749 for_each_node_mask(node, cpuset_current_mems_allowed)
2750 nr += array[node];
2751
2752 return nr;
2753 }
2754
2755 #ifdef CONFIG_SYSCTL
2756 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2757 struct ctl_table *table, int write,
2758 void __user *buffer, size_t *length, loff_t *ppos)
2759 {
2760 struct hstate *h = &default_hstate;
2761 unsigned long tmp = h->max_huge_pages;
2762 int ret;
2763
2764 if (!hugepages_supported())
2765 return -EOPNOTSUPP;
2766
2767 table->data = &tmp;
2768 table->maxlen = sizeof(unsigned long);
2769 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2770 if (ret)
2771 goto out;
2772
2773 if (write)
2774 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2775 NUMA_NO_NODE, tmp, *length);
2776 out:
2777 return ret;
2778 }
2779
2780 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2781 void __user *buffer, size_t *length, loff_t *ppos)
2782 {
2783
2784 return hugetlb_sysctl_handler_common(false, table, write,
2785 buffer, length, ppos);
2786 }
2787
2788 #ifdef CONFIG_NUMA
2789 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2790 void __user *buffer, size_t *length, loff_t *ppos)
2791 {
2792 return hugetlb_sysctl_handler_common(true, table, write,
2793 buffer, length, ppos);
2794 }
2795 #endif /* CONFIG_NUMA */
2796
2797 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2798 void __user *buffer,
2799 size_t *length, loff_t *ppos)
2800 {
2801 struct hstate *h = &default_hstate;
2802 unsigned long tmp;
2803 int ret;
2804
2805 if (!hugepages_supported())
2806 return -EOPNOTSUPP;
2807
2808 tmp = h->nr_overcommit_huge_pages;
2809
2810 if (write && hstate_is_gigantic(h))
2811 return -EINVAL;
2812
2813 table->data = &tmp;
2814 table->maxlen = sizeof(unsigned long);
2815 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2816 if (ret)
2817 goto out;
2818
2819 if (write) {
2820 spin_lock(&hugetlb_lock);
2821 h->nr_overcommit_huge_pages = tmp;
2822 spin_unlock(&hugetlb_lock);
2823 }
2824 out:
2825 return ret;
2826 }
2827
2828 #endif /* CONFIG_SYSCTL */
2829
2830 void hugetlb_report_meminfo(struct seq_file *m)
2831 {
2832 struct hstate *h = &default_hstate;
2833 if (!hugepages_supported())
2834 return;
2835 seq_printf(m,
2836 "HugePages_Total: %5lu\n"
2837 "HugePages_Free: %5lu\n"
2838 "HugePages_Rsvd: %5lu\n"
2839 "HugePages_Surp: %5lu\n"
2840 "Hugepagesize: %8lu kB\n",
2841 h->nr_huge_pages,
2842 h->free_huge_pages,
2843 h->resv_huge_pages,
2844 h->surplus_huge_pages,
2845 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2846 }
2847
2848 int hugetlb_report_node_meminfo(int nid, char *buf)
2849 {
2850 struct hstate *h = &default_hstate;
2851 if (!hugepages_supported())
2852 return 0;
2853 return sprintf(buf,
2854 "Node %d HugePages_Total: %5u\n"
2855 "Node %d HugePages_Free: %5u\n"
2856 "Node %d HugePages_Surp: %5u\n",
2857 nid, h->nr_huge_pages_node[nid],
2858 nid, h->free_huge_pages_node[nid],
2859 nid, h->surplus_huge_pages_node[nid]);
2860 }
2861
2862 void hugetlb_show_meminfo(void)
2863 {
2864 struct hstate *h;
2865 int nid;
2866
2867 if (!hugepages_supported())
2868 return;
2869
2870 for_each_node_state(nid, N_MEMORY)
2871 for_each_hstate(h)
2872 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2873 nid,
2874 h->nr_huge_pages_node[nid],
2875 h->free_huge_pages_node[nid],
2876 h->surplus_huge_pages_node[nid],
2877 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2878 }
2879
2880 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
2881 {
2882 seq_printf(m, "HugetlbPages:\t%8lu kB\n",
2883 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
2884 }
2885
2886 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2887 unsigned long hugetlb_total_pages(void)
2888 {
2889 struct hstate *h;
2890 unsigned long nr_total_pages = 0;
2891
2892 for_each_hstate(h)
2893 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2894 return nr_total_pages;
2895 }
2896
2897 static int hugetlb_acct_memory(struct hstate *h, long delta)
2898 {
2899 int ret = -ENOMEM;
2900
2901 spin_lock(&hugetlb_lock);
2902 /*
2903 * When cpuset is configured, it breaks the strict hugetlb page
2904 * reservation as the accounting is done on a global variable. Such
2905 * reservation is completely rubbish in the presence of cpuset because
2906 * the reservation is not checked against page availability for the
2907 * current cpuset. Application can still potentially OOM'ed by kernel
2908 * with lack of free htlb page in cpuset that the task is in.
2909 * Attempt to enforce strict accounting with cpuset is almost
2910 * impossible (or too ugly) because cpuset is too fluid that
2911 * task or memory node can be dynamically moved between cpusets.
2912 *
2913 * The change of semantics for shared hugetlb mapping with cpuset is
2914 * undesirable. However, in order to preserve some of the semantics,
2915 * we fall back to check against current free page availability as
2916 * a best attempt and hopefully to minimize the impact of changing
2917 * semantics that cpuset has.
2918 */
2919 if (delta > 0) {
2920 if (gather_surplus_pages(h, delta) < 0)
2921 goto out;
2922
2923 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2924 return_unused_surplus_pages(h, delta);
2925 goto out;
2926 }
2927 }
2928
2929 ret = 0;
2930 if (delta < 0)
2931 return_unused_surplus_pages(h, (unsigned long) -delta);
2932
2933 out:
2934 spin_unlock(&hugetlb_lock);
2935 return ret;
2936 }
2937
2938 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2939 {
2940 struct resv_map *resv = vma_resv_map(vma);
2941
2942 /*
2943 * This new VMA should share its siblings reservation map if present.
2944 * The VMA will only ever have a valid reservation map pointer where
2945 * it is being copied for another still existing VMA. As that VMA
2946 * has a reference to the reservation map it cannot disappear until
2947 * after this open call completes. It is therefore safe to take a
2948 * new reference here without additional locking.
2949 */
2950 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2951 kref_get(&resv->refs);
2952 }
2953
2954 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2955 {
2956 struct hstate *h = hstate_vma(vma);
2957 struct resv_map *resv = vma_resv_map(vma);
2958 struct hugepage_subpool *spool = subpool_vma(vma);
2959 unsigned long reserve, start, end;
2960 long gbl_reserve;
2961
2962 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2963 return;
2964
2965 start = vma_hugecache_offset(h, vma, vma->vm_start);
2966 end = vma_hugecache_offset(h, vma, vma->vm_end);
2967
2968 reserve = (end - start) - region_count(resv, start, end);
2969
2970 kref_put(&resv->refs, resv_map_release);
2971
2972 if (reserve) {
2973 /*
2974 * Decrement reserve counts. The global reserve count may be
2975 * adjusted if the subpool has a minimum size.
2976 */
2977 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
2978 hugetlb_acct_memory(h, -gbl_reserve);
2979 }
2980 }
2981
2982 /*
2983 * We cannot handle pagefaults against hugetlb pages at all. They cause
2984 * handle_mm_fault() to try to instantiate regular-sized pages in the
2985 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
2986 * this far.
2987 */
2988 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2989 {
2990 BUG();
2991 return 0;
2992 }
2993
2994 const struct vm_operations_struct hugetlb_vm_ops = {
2995 .fault = hugetlb_vm_op_fault,
2996 .open = hugetlb_vm_op_open,
2997 .close = hugetlb_vm_op_close,
2998 };
2999
3000 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3001 int writable)
3002 {
3003 pte_t entry;
3004
3005 if (writable) {
3006 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3007 vma->vm_page_prot)));
3008 } else {
3009 entry = huge_pte_wrprotect(mk_huge_pte(page,
3010 vma->vm_page_prot));
3011 }
3012 entry = pte_mkyoung(entry);
3013 entry = pte_mkhuge(entry);
3014 entry = arch_make_huge_pte(entry, vma, page, writable);
3015
3016 return entry;
3017 }
3018
3019 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3020 unsigned long address, pte_t *ptep)
3021 {
3022 pte_t entry;
3023
3024 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3025 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3026 update_mmu_cache(vma, address, ptep);
3027 }
3028
3029 static int is_hugetlb_entry_migration(pte_t pte)
3030 {
3031 swp_entry_t swp;
3032
3033 if (huge_pte_none(pte) || pte_present(pte))
3034 return 0;
3035 swp = pte_to_swp_entry(pte);
3036 if (non_swap_entry(swp) && is_migration_entry(swp))
3037 return 1;
3038 else
3039 return 0;
3040 }
3041
3042 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3043 {
3044 swp_entry_t swp;
3045
3046 if (huge_pte_none(pte) || pte_present(pte))
3047 return 0;
3048 swp = pte_to_swp_entry(pte);
3049 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3050 return 1;
3051 else
3052 return 0;
3053 }
3054
3055 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3056 struct vm_area_struct *vma)
3057 {
3058 pte_t *src_pte, *dst_pte, entry;
3059 struct page *ptepage;
3060 unsigned long addr;
3061 int cow;
3062 struct hstate *h = hstate_vma(vma);
3063 unsigned long sz = huge_page_size(h);
3064 unsigned long mmun_start; /* For mmu_notifiers */
3065 unsigned long mmun_end; /* For mmu_notifiers */
3066 int ret = 0;
3067
3068 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3069
3070 mmun_start = vma->vm_start;
3071 mmun_end = vma->vm_end;
3072 if (cow)
3073 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3074
3075 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3076 spinlock_t *src_ptl, *dst_ptl;
3077 src_pte = huge_pte_offset(src, addr);
3078 if (!src_pte)
3079 continue;
3080 dst_pte = huge_pte_alloc(dst, addr, sz);
3081 if (!dst_pte) {
3082 ret = -ENOMEM;
3083 break;
3084 }
3085
3086 /* If the pagetables are shared don't copy or take references */
3087 if (dst_pte == src_pte)
3088 continue;
3089
3090 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3091 src_ptl = huge_pte_lockptr(h, src, src_pte);
3092 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3093 entry = huge_ptep_get(src_pte);
3094 if (huge_pte_none(entry)) { /* skip none entry */
3095 ;
3096 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3097 is_hugetlb_entry_hwpoisoned(entry))) {
3098 swp_entry_t swp_entry = pte_to_swp_entry(entry);
3099
3100 if (is_write_migration_entry(swp_entry) && cow) {
3101 /*
3102 * COW mappings require pages in both
3103 * parent and child to be set to read.
3104 */
3105 make_migration_entry_read(&swp_entry);
3106 entry = swp_entry_to_pte(swp_entry);
3107 set_huge_pte_at(src, addr, src_pte, entry);
3108 }
3109 set_huge_pte_at(dst, addr, dst_pte, entry);
3110 } else {
3111 if (cow) {
3112 huge_ptep_set_wrprotect(src, addr, src_pte);
3113 mmu_notifier_invalidate_range(src, mmun_start,
3114 mmun_end);
3115 }
3116 entry = huge_ptep_get(src_pte);
3117 ptepage = pte_page(entry);
3118 get_page(ptepage);
3119 page_dup_rmap(ptepage, true);
3120 set_huge_pte_at(dst, addr, dst_pte, entry);
3121 hugetlb_count_add(pages_per_huge_page(h), dst);
3122 }
3123 spin_unlock(src_ptl);
3124 spin_unlock(dst_ptl);
3125 }
3126
3127 if (cow)
3128 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3129
3130 return ret;
3131 }
3132
3133 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3134 unsigned long start, unsigned long end,
3135 struct page *ref_page)
3136 {
3137 int force_flush = 0;
3138 struct mm_struct *mm = vma->vm_mm;
3139 unsigned long address;
3140 pte_t *ptep;
3141 pte_t pte;
3142 spinlock_t *ptl;
3143 struct page *page;
3144 struct hstate *h = hstate_vma(vma);
3145 unsigned long sz = huge_page_size(h);
3146 const unsigned long mmun_start = start; /* For mmu_notifiers */
3147 const unsigned long mmun_end = end; /* For mmu_notifiers */
3148
3149 WARN_ON(!is_vm_hugetlb_page(vma));
3150 BUG_ON(start & ~huge_page_mask(h));
3151 BUG_ON(end & ~huge_page_mask(h));
3152
3153 tlb_start_vma(tlb, vma);
3154 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3155 address = start;
3156 again:
3157 for (; address < end; address += sz) {
3158 ptep = huge_pte_offset(mm, address);
3159 if (!ptep)
3160 continue;
3161
3162 ptl = huge_pte_lock(h, mm, ptep);
3163 if (huge_pmd_unshare(mm, &address, ptep))
3164 goto unlock;
3165
3166 pte = huge_ptep_get(ptep);
3167 if (huge_pte_none(pte))
3168 goto unlock;
3169
3170 /*
3171 * Migrating hugepage or HWPoisoned hugepage is already
3172 * unmapped and its refcount is dropped, so just clear pte here.
3173 */
3174 if (unlikely(!pte_present(pte))) {
3175 huge_pte_clear(mm, address, ptep);
3176 goto unlock;
3177 }
3178
3179 page = pte_page(pte);
3180 /*
3181 * If a reference page is supplied, it is because a specific
3182 * page is being unmapped, not a range. Ensure the page we
3183 * are about to unmap is the actual page of interest.
3184 */
3185 if (ref_page) {
3186 if (page != ref_page)
3187 goto unlock;
3188
3189 /*
3190 * Mark the VMA as having unmapped its page so that
3191 * future faults in this VMA will fail rather than
3192 * looking like data was lost
3193 */
3194 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3195 }
3196
3197 pte = huge_ptep_get_and_clear(mm, address, ptep);
3198 tlb_remove_tlb_entry(tlb, ptep, address);
3199 if (huge_pte_dirty(pte))
3200 set_page_dirty(page);
3201
3202 hugetlb_count_sub(pages_per_huge_page(h), mm);
3203 page_remove_rmap(page, true);
3204 force_flush = !__tlb_remove_page(tlb, page);
3205 if (force_flush) {
3206 address += sz;
3207 spin_unlock(ptl);
3208 break;
3209 }
3210 /* Bail out after unmapping reference page if supplied */
3211 if (ref_page) {
3212 spin_unlock(ptl);
3213 break;
3214 }
3215 unlock:
3216 spin_unlock(ptl);
3217 }
3218 /*
3219 * mmu_gather ran out of room to batch pages, we break out of
3220 * the PTE lock to avoid doing the potential expensive TLB invalidate
3221 * and page-free while holding it.
3222 */
3223 if (force_flush) {
3224 force_flush = 0;
3225 tlb_flush_mmu(tlb);
3226 if (address < end && !ref_page)
3227 goto again;
3228 }
3229 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3230 tlb_end_vma(tlb, vma);
3231 }
3232
3233 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3234 struct vm_area_struct *vma, unsigned long start,
3235 unsigned long end, struct page *ref_page)
3236 {
3237 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3238
3239 /*
3240 * Clear this flag so that x86's huge_pmd_share page_table_shareable
3241 * test will fail on a vma being torn down, and not grab a page table
3242 * on its way out. We're lucky that the flag has such an appropriate
3243 * name, and can in fact be safely cleared here. We could clear it
3244 * before the __unmap_hugepage_range above, but all that's necessary
3245 * is to clear it before releasing the i_mmap_rwsem. This works
3246 * because in the context this is called, the VMA is about to be
3247 * destroyed and the i_mmap_rwsem is held.
3248 */
3249 vma->vm_flags &= ~VM_MAYSHARE;
3250 }
3251
3252 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3253 unsigned long end, struct page *ref_page)
3254 {
3255 struct mm_struct *mm;
3256 struct mmu_gather tlb;
3257
3258 mm = vma->vm_mm;
3259
3260 tlb_gather_mmu(&tlb, mm, start, end);
3261 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3262 tlb_finish_mmu(&tlb, start, end);
3263 }
3264
3265 /*
3266 * This is called when the original mapper is failing to COW a MAP_PRIVATE
3267 * mappping it owns the reserve page for. The intention is to unmap the page
3268 * from other VMAs and let the children be SIGKILLed if they are faulting the
3269 * same region.
3270 */
3271 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3272 struct page *page, unsigned long address)
3273 {
3274 struct hstate *h = hstate_vma(vma);
3275 struct vm_area_struct *iter_vma;
3276 struct address_space *mapping;
3277 pgoff_t pgoff;
3278
3279 /*
3280 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3281 * from page cache lookup which is in HPAGE_SIZE units.
3282 */
3283 address = address & huge_page_mask(h);
3284 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3285 vma->vm_pgoff;
3286 mapping = file_inode(vma->vm_file)->i_mapping;
3287
3288 /*
3289 * Take the mapping lock for the duration of the table walk. As
3290 * this mapping should be shared between all the VMAs,
3291 * __unmap_hugepage_range() is called as the lock is already held
3292 */
3293 i_mmap_lock_write(mapping);
3294 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3295 /* Do not unmap the current VMA */
3296 if (iter_vma == vma)
3297 continue;
3298
3299 /*
3300 * Shared VMAs have their own reserves and do not affect
3301 * MAP_PRIVATE accounting but it is possible that a shared
3302 * VMA is using the same page so check and skip such VMAs.
3303 */
3304 if (iter_vma->vm_flags & VM_MAYSHARE)
3305 continue;
3306
3307 /*
3308 * Unmap the page from other VMAs without their own reserves.
3309 * They get marked to be SIGKILLed if they fault in these
3310 * areas. This is because a future no-page fault on this VMA
3311 * could insert a zeroed page instead of the data existing
3312 * from the time of fork. This would look like data corruption
3313 */
3314 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3315 unmap_hugepage_range(iter_vma, address,
3316 address + huge_page_size(h), page);
3317 }
3318 i_mmap_unlock_write(mapping);
3319 }
3320
3321 /*
3322 * Hugetlb_cow() should be called with page lock of the original hugepage held.
3323 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3324 * cannot race with other handlers or page migration.
3325 * Keep the pte_same checks anyway to make transition from the mutex easier.
3326 */
3327 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3328 unsigned long address, pte_t *ptep, pte_t pte,
3329 struct page *pagecache_page, spinlock_t *ptl)
3330 {
3331 struct hstate *h = hstate_vma(vma);
3332 struct page *old_page, *new_page;
3333 int ret = 0, outside_reserve = 0;
3334 unsigned long mmun_start; /* For mmu_notifiers */
3335 unsigned long mmun_end; /* For mmu_notifiers */
3336
3337 old_page = pte_page(pte);
3338
3339 retry_avoidcopy:
3340 /* If no-one else is actually using this page, avoid the copy
3341 * and just make the page writable */
3342 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3343 page_move_anon_rmap(old_page, vma, address);
3344 set_huge_ptep_writable(vma, address, ptep);
3345 return 0;
3346 }
3347
3348 /*
3349 * If the process that created a MAP_PRIVATE mapping is about to
3350 * perform a COW due to a shared page count, attempt to satisfy
3351 * the allocation without using the existing reserves. The pagecache
3352 * page is used to determine if the reserve at this address was
3353 * consumed or not. If reserves were used, a partial faulted mapping
3354 * at the time of fork() could consume its reserves on COW instead
3355 * of the full address range.
3356 */
3357 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3358 old_page != pagecache_page)
3359 outside_reserve = 1;
3360
3361 get_page(old_page);
3362
3363 /*
3364 * Drop page table lock as buddy allocator may be called. It will
3365 * be acquired again before returning to the caller, as expected.
3366 */
3367 spin_unlock(ptl);
3368 new_page = alloc_huge_page(vma, address, outside_reserve);
3369
3370 if (IS_ERR(new_page)) {
3371 /*
3372 * If a process owning a MAP_PRIVATE mapping fails to COW,
3373 * it is due to references held by a child and an insufficient
3374 * huge page pool. To guarantee the original mappers
3375 * reliability, unmap the page from child processes. The child
3376 * may get SIGKILLed if it later faults.
3377 */
3378 if (outside_reserve) {
3379 put_page(old_page);
3380 BUG_ON(huge_pte_none(pte));
3381 unmap_ref_private(mm, vma, old_page, address);
3382 BUG_ON(huge_pte_none(pte));
3383 spin_lock(ptl);
3384 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3385 if (likely(ptep &&
3386 pte_same(huge_ptep_get(ptep), pte)))
3387 goto retry_avoidcopy;
3388 /*
3389 * race occurs while re-acquiring page table
3390 * lock, and our job is done.
3391 */
3392 return 0;
3393 }
3394
3395 ret = (PTR_ERR(new_page) == -ENOMEM) ?
3396 VM_FAULT_OOM : VM_FAULT_SIGBUS;
3397 goto out_release_old;
3398 }
3399
3400 /*
3401 * When the original hugepage is shared one, it does not have
3402 * anon_vma prepared.
3403 */
3404 if (unlikely(anon_vma_prepare(vma))) {
3405 ret = VM_FAULT_OOM;
3406 goto out_release_all;
3407 }
3408
3409 copy_user_huge_page(new_page, old_page, address, vma,
3410 pages_per_huge_page(h));
3411 __SetPageUptodate(new_page);
3412 set_page_huge_active(new_page);
3413
3414 mmun_start = address & huge_page_mask(h);
3415 mmun_end = mmun_start + huge_page_size(h);
3416 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3417
3418 /*
3419 * Retake the page table lock to check for racing updates
3420 * before the page tables are altered
3421 */
3422 spin_lock(ptl);
3423 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3424 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3425 ClearPagePrivate(new_page);
3426
3427 /* Break COW */
3428 huge_ptep_clear_flush(vma, address, ptep);
3429 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3430 set_huge_pte_at(mm, address, ptep,
3431 make_huge_pte(vma, new_page, 1));
3432 page_remove_rmap(old_page, true);
3433 hugepage_add_new_anon_rmap(new_page, vma, address);
3434 /* Make the old page be freed below */
3435 new_page = old_page;
3436 }
3437 spin_unlock(ptl);
3438 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3439 out_release_all:
3440 put_page(new_page);
3441 out_release_old:
3442 put_page(old_page);
3443
3444 spin_lock(ptl); /* Caller expects lock to be held */
3445 return ret;
3446 }
3447
3448 /* Return the pagecache page at a given address within a VMA */
3449 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3450 struct vm_area_struct *vma, unsigned long address)
3451 {
3452 struct address_space *mapping;
3453 pgoff_t idx;
3454
3455 mapping = vma->vm_file->f_mapping;
3456 idx = vma_hugecache_offset(h, vma, address);
3457
3458 return find_lock_page(mapping, idx);
3459 }
3460
3461 /*
3462 * Return whether there is a pagecache page to back given address within VMA.
3463 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3464 */
3465 static bool hugetlbfs_pagecache_present(struct hstate *h,
3466 struct vm_area_struct *vma, unsigned long address)
3467 {
3468 struct address_space *mapping;
3469 pgoff_t idx;
3470 struct page *page;
3471
3472 mapping = vma->vm_file->f_mapping;
3473 idx = vma_hugecache_offset(h, vma, address);
3474
3475 page = find_get_page(mapping, idx);
3476 if (page)
3477 put_page(page);
3478 return page != NULL;
3479 }
3480
3481 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3482 pgoff_t idx)
3483 {
3484 struct inode *inode = mapping->host;
3485 struct hstate *h = hstate_inode(inode);
3486 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3487
3488 if (err)
3489 return err;
3490 ClearPagePrivate(page);
3491
3492 spin_lock(&inode->i_lock);
3493 inode->i_blocks += blocks_per_huge_page(h);
3494 spin_unlock(&inode->i_lock);
3495 return 0;
3496 }
3497
3498 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3499 struct address_space *mapping, pgoff_t idx,
3500 unsigned long address, pte_t *ptep, unsigned int flags)
3501 {
3502 struct hstate *h = hstate_vma(vma);
3503 int ret = VM_FAULT_SIGBUS;
3504 int anon_rmap = 0;
3505 unsigned long size;
3506 struct page *page;
3507 pte_t new_pte;
3508 spinlock_t *ptl;
3509
3510 /*
3511 * Currently, we are forced to kill the process in the event the
3512 * original mapper has unmapped pages from the child due to a failed
3513 * COW. Warn that such a situation has occurred as it may not be obvious
3514 */
3515 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3516 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3517 current->pid);
3518 return ret;
3519 }
3520
3521 /*
3522 * Use page lock to guard against racing truncation
3523 * before we get page_table_lock.
3524 */
3525 retry:
3526 page = find_lock_page(mapping, idx);
3527 if (!page) {
3528 size = i_size_read(mapping->host) >> huge_page_shift(h);
3529 if (idx >= size)
3530 goto out;
3531 page = alloc_huge_page(vma, address, 0);
3532 if (IS_ERR(page)) {
3533 ret = PTR_ERR(page);
3534 if (ret == -ENOMEM)
3535 ret = VM_FAULT_OOM;
3536 else
3537 ret = VM_FAULT_SIGBUS;
3538 goto out;
3539 }
3540 clear_huge_page(page, address, pages_per_huge_page(h));
3541 __SetPageUptodate(page);
3542 set_page_huge_active(page);
3543
3544 if (vma->vm_flags & VM_MAYSHARE) {
3545 int err = huge_add_to_page_cache(page, mapping, idx);
3546 if (err) {
3547 put_page(page);
3548 if (err == -EEXIST)
3549 goto retry;
3550 goto out;
3551 }
3552 } else {
3553 lock_page(page);
3554 if (unlikely(anon_vma_prepare(vma))) {
3555 ret = VM_FAULT_OOM;
3556 goto backout_unlocked;
3557 }
3558 anon_rmap = 1;
3559 }
3560 } else {
3561 /*
3562 * If memory error occurs between mmap() and fault, some process
3563 * don't have hwpoisoned swap entry for errored virtual address.
3564 * So we need to block hugepage fault by PG_hwpoison bit check.
3565 */
3566 if (unlikely(PageHWPoison(page))) {
3567 ret = VM_FAULT_HWPOISON |
3568 VM_FAULT_SET_HINDEX(hstate_index(h));
3569 goto backout_unlocked;
3570 }
3571 }
3572
3573 /*
3574 * If we are going to COW a private mapping later, we examine the
3575 * pending reservations for this page now. This will ensure that
3576 * any allocations necessary to record that reservation occur outside
3577 * the spinlock.
3578 */
3579 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3580 if (vma_needs_reservation(h, vma, address) < 0) {
3581 ret = VM_FAULT_OOM;
3582 goto backout_unlocked;
3583 }
3584 /* Just decrements count, does not deallocate */
3585 vma_end_reservation(h, vma, address);
3586 }
3587
3588 ptl = huge_pte_lockptr(h, mm, ptep);
3589 spin_lock(ptl);
3590 size = i_size_read(mapping->host) >> huge_page_shift(h);
3591 if (idx >= size)
3592 goto backout;
3593
3594 ret = 0;
3595 if (!huge_pte_none(huge_ptep_get(ptep)))
3596 goto backout;
3597
3598 if (anon_rmap) {
3599 ClearPagePrivate(page);
3600 hugepage_add_new_anon_rmap(page, vma, address);
3601 } else
3602 page_dup_rmap(page, true);
3603 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3604 && (vma->vm_flags & VM_SHARED)));
3605 set_huge_pte_at(mm, address, ptep, new_pte);
3606
3607 hugetlb_count_add(pages_per_huge_page(h), mm);
3608 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3609 /* Optimization, do the COW without a second fault */
3610 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3611 }
3612
3613 spin_unlock(ptl);
3614 unlock_page(page);
3615 out:
3616 return ret;
3617
3618 backout:
3619 spin_unlock(ptl);
3620 backout_unlocked:
3621 unlock_page(page);
3622 put_page(page);
3623 goto out;
3624 }
3625
3626 #ifdef CONFIG_SMP
3627 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3628 struct vm_area_struct *vma,
3629 struct address_space *mapping,
3630 pgoff_t idx, unsigned long address)
3631 {
3632 unsigned long key[2];
3633 u32 hash;
3634
3635 if (vma->vm_flags & VM_SHARED) {
3636 key[0] = (unsigned long) mapping;
3637 key[1] = idx;
3638 } else {
3639 key[0] = (unsigned long) mm;
3640 key[1] = address >> huge_page_shift(h);
3641 }
3642
3643 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3644
3645 return hash & (num_fault_mutexes - 1);
3646 }
3647 #else
3648 /*
3649 * For uniprocesor systems we always use a single mutex, so just
3650 * return 0 and avoid the hashing overhead.
3651 */
3652 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3653 struct vm_area_struct *vma,
3654 struct address_space *mapping,
3655 pgoff_t idx, unsigned long address)
3656 {
3657 return 0;
3658 }
3659 #endif
3660
3661 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3662 unsigned long address, unsigned int flags)
3663 {
3664 pte_t *ptep, entry;
3665 spinlock_t *ptl;
3666 int ret;
3667 u32 hash;
3668 pgoff_t idx;
3669 struct page *page = NULL;
3670 struct page *pagecache_page = NULL;
3671 struct hstate *h = hstate_vma(vma);
3672 struct address_space *mapping;
3673 int need_wait_lock = 0;
3674
3675 address &= huge_page_mask(h);
3676
3677 ptep = huge_pte_offset(mm, address);
3678 if (ptep) {
3679 entry = huge_ptep_get(ptep);
3680 if (unlikely(is_hugetlb_entry_migration(entry))) {
3681 migration_entry_wait_huge(vma, mm, ptep);
3682 return 0;
3683 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3684 return VM_FAULT_HWPOISON_LARGE |
3685 VM_FAULT_SET_HINDEX(hstate_index(h));
3686 } else {
3687 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3688 if (!ptep)
3689 return VM_FAULT_OOM;
3690 }
3691
3692 mapping = vma->vm_file->f_mapping;
3693 idx = vma_hugecache_offset(h, vma, address);
3694
3695 /*
3696 * Serialize hugepage allocation and instantiation, so that we don't
3697 * get spurious allocation failures if two CPUs race to instantiate
3698 * the same page in the page cache.
3699 */
3700 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
3701 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3702
3703 entry = huge_ptep_get(ptep);
3704 if (huge_pte_none(entry)) {
3705 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3706 goto out_mutex;
3707 }
3708
3709 ret = 0;
3710
3711 /*
3712 * entry could be a migration/hwpoison entry at this point, so this
3713 * check prevents the kernel from going below assuming that we have
3714 * a active hugepage in pagecache. This goto expects the 2nd page fault,
3715 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3716 * handle it.
3717 */
3718 if (!pte_present(entry))
3719 goto out_mutex;
3720
3721 /*
3722 * If we are going to COW the mapping later, we examine the pending
3723 * reservations for this page now. This will ensure that any
3724 * allocations necessary to record that reservation occur outside the
3725 * spinlock. For private mappings, we also lookup the pagecache
3726 * page now as it is used to determine if a reservation has been
3727 * consumed.
3728 */
3729 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3730 if (vma_needs_reservation(h, vma, address) < 0) {
3731 ret = VM_FAULT_OOM;
3732 goto out_mutex;
3733 }
3734 /* Just decrements count, does not deallocate */
3735 vma_end_reservation(h, vma, address);
3736
3737 if (!(vma->vm_flags & VM_MAYSHARE))
3738 pagecache_page = hugetlbfs_pagecache_page(h,
3739 vma, address);
3740 }
3741
3742 ptl = huge_pte_lock(h, mm, ptep);
3743
3744 /* Check for a racing update before calling hugetlb_cow */
3745 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3746 goto out_ptl;
3747
3748 /*
3749 * hugetlb_cow() requires page locks of pte_page(entry) and
3750 * pagecache_page, so here we need take the former one
3751 * when page != pagecache_page or !pagecache_page.
3752 */
3753 page = pte_page(entry);
3754 if (page != pagecache_page)
3755 if (!trylock_page(page)) {
3756 need_wait_lock = 1;
3757 goto out_ptl;
3758 }
3759
3760 get_page(page);
3761
3762 if (flags & FAULT_FLAG_WRITE) {
3763 if (!huge_pte_write(entry)) {
3764 ret = hugetlb_cow(mm, vma, address, ptep, entry,
3765 pagecache_page, ptl);
3766 goto out_put_page;
3767 }
3768 entry = huge_pte_mkdirty(entry);
3769 }
3770 entry = pte_mkyoung(entry);
3771 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3772 flags & FAULT_FLAG_WRITE))
3773 update_mmu_cache(vma, address, ptep);
3774 out_put_page:
3775 if (page != pagecache_page)
3776 unlock_page(page);
3777 put_page(page);
3778 out_ptl:
3779 spin_unlock(ptl);
3780
3781 if (pagecache_page) {
3782 unlock_page(pagecache_page);
3783 put_page(pagecache_page);
3784 }
3785 out_mutex:
3786 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3787 /*
3788 * Generally it's safe to hold refcount during waiting page lock. But
3789 * here we just wait to defer the next page fault to avoid busy loop and
3790 * the page is not used after unlocked before returning from the current
3791 * page fault. So we are safe from accessing freed page, even if we wait
3792 * here without taking refcount.
3793 */
3794 if (need_wait_lock)
3795 wait_on_page_locked(page);
3796 return ret;
3797 }
3798
3799 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3800 struct page **pages, struct vm_area_struct **vmas,
3801 unsigned long *position, unsigned long *nr_pages,
3802 long i, unsigned int flags)
3803 {
3804 unsigned long pfn_offset;
3805 unsigned long vaddr = *position;
3806 unsigned long remainder = *nr_pages;
3807 struct hstate *h = hstate_vma(vma);
3808
3809 while (vaddr < vma->vm_end && remainder) {
3810 pte_t *pte;
3811 spinlock_t *ptl = NULL;
3812 int absent;
3813 struct page *page;
3814
3815 /*
3816 * If we have a pending SIGKILL, don't keep faulting pages and
3817 * potentially allocating memory.
3818 */
3819 if (unlikely(fatal_signal_pending(current))) {
3820 remainder = 0;
3821 break;
3822 }
3823
3824 /*
3825 * Some archs (sparc64, sh*) have multiple pte_ts to
3826 * each hugepage. We have to make sure we get the
3827 * first, for the page indexing below to work.
3828 *
3829 * Note that page table lock is not held when pte is null.
3830 */
3831 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3832 if (pte)
3833 ptl = huge_pte_lock(h, mm, pte);
3834 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3835
3836 /*
3837 * When coredumping, it suits get_dump_page if we just return
3838 * an error where there's an empty slot with no huge pagecache
3839 * to back it. This way, we avoid allocating a hugepage, and
3840 * the sparse dumpfile avoids allocating disk blocks, but its
3841 * huge holes still show up with zeroes where they need to be.
3842 */
3843 if (absent && (flags & FOLL_DUMP) &&
3844 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3845 if (pte)
3846 spin_unlock(ptl);
3847 remainder = 0;
3848 break;
3849 }
3850
3851 /*
3852 * We need call hugetlb_fault for both hugepages under migration
3853 * (in which case hugetlb_fault waits for the migration,) and
3854 * hwpoisoned hugepages (in which case we need to prevent the
3855 * caller from accessing to them.) In order to do this, we use
3856 * here is_swap_pte instead of is_hugetlb_entry_migration and
3857 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3858 * both cases, and because we can't follow correct pages
3859 * directly from any kind of swap entries.
3860 */
3861 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3862 ((flags & FOLL_WRITE) &&
3863 !huge_pte_write(huge_ptep_get(pte)))) {
3864 int ret;
3865
3866 if (pte)
3867 spin_unlock(ptl);
3868 ret = hugetlb_fault(mm, vma, vaddr,
3869 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3870 if (!(ret & VM_FAULT_ERROR))
3871 continue;
3872
3873 remainder = 0;
3874 break;
3875 }
3876
3877 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3878 page = pte_page(huge_ptep_get(pte));
3879 same_page:
3880 if (pages) {
3881 pages[i] = mem_map_offset(page, pfn_offset);
3882 get_page(pages[i]);
3883 }
3884
3885 if (vmas)
3886 vmas[i] = vma;
3887
3888 vaddr += PAGE_SIZE;
3889 ++pfn_offset;
3890 --remainder;
3891 ++i;
3892 if (vaddr < vma->vm_end && remainder &&
3893 pfn_offset < pages_per_huge_page(h)) {
3894 /*
3895 * We use pfn_offset to avoid touching the pageframes
3896 * of this compound page.
3897 */
3898 goto same_page;
3899 }
3900 spin_unlock(ptl);
3901 }
3902 *nr_pages = remainder;
3903 *position = vaddr;
3904
3905 return i ? i : -EFAULT;
3906 }
3907
3908 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3909 unsigned long address, unsigned long end, pgprot_t newprot)
3910 {
3911 struct mm_struct *mm = vma->vm_mm;
3912 unsigned long start = address;
3913 pte_t *ptep;
3914 pte_t pte;
3915 struct hstate *h = hstate_vma(vma);
3916 unsigned long pages = 0;
3917
3918 BUG_ON(address >= end);
3919 flush_cache_range(vma, address, end);
3920
3921 mmu_notifier_invalidate_range_start(mm, start, end);
3922 i_mmap_lock_write(vma->vm_file->f_mapping);
3923 for (; address < end; address += huge_page_size(h)) {
3924 spinlock_t *ptl;
3925 ptep = huge_pte_offset(mm, address);
3926 if (!ptep)
3927 continue;
3928 ptl = huge_pte_lock(h, mm, ptep);
3929 if (huge_pmd_unshare(mm, &address, ptep)) {
3930 pages++;
3931 spin_unlock(ptl);
3932 continue;
3933 }
3934 pte = huge_ptep_get(ptep);
3935 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3936 spin_unlock(ptl);
3937 continue;
3938 }
3939 if (unlikely(is_hugetlb_entry_migration(pte))) {
3940 swp_entry_t entry = pte_to_swp_entry(pte);
3941
3942 if (is_write_migration_entry(entry)) {
3943 pte_t newpte;
3944
3945 make_migration_entry_read(&entry);
3946 newpte = swp_entry_to_pte(entry);
3947 set_huge_pte_at(mm, address, ptep, newpte);
3948 pages++;
3949 }
3950 spin_unlock(ptl);
3951 continue;
3952 }
3953 if (!huge_pte_none(pte)) {
3954 pte = huge_ptep_get_and_clear(mm, address, ptep);
3955 pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3956 pte = arch_make_huge_pte(pte, vma, NULL, 0);
3957 set_huge_pte_at(mm, address, ptep, pte);
3958 pages++;
3959 }
3960 spin_unlock(ptl);
3961 }
3962 /*
3963 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
3964 * may have cleared our pud entry and done put_page on the page table:
3965 * once we release i_mmap_rwsem, another task can do the final put_page
3966 * and that page table be reused and filled with junk.
3967 */
3968 flush_tlb_range(vma, start, end);
3969 mmu_notifier_invalidate_range(mm, start, end);
3970 i_mmap_unlock_write(vma->vm_file->f_mapping);
3971 mmu_notifier_invalidate_range_end(mm, start, end);
3972
3973 return pages << h->order;
3974 }
3975
3976 int hugetlb_reserve_pages(struct inode *inode,
3977 long from, long to,
3978 struct vm_area_struct *vma,
3979 vm_flags_t vm_flags)
3980 {
3981 long ret, chg;
3982 struct hstate *h = hstate_inode(inode);
3983 struct hugepage_subpool *spool = subpool_inode(inode);
3984 struct resv_map *resv_map;
3985 long gbl_reserve;
3986
3987 /*
3988 * Only apply hugepage reservation if asked. At fault time, an
3989 * attempt will be made for VM_NORESERVE to allocate a page
3990 * without using reserves
3991 */
3992 if (vm_flags & VM_NORESERVE)
3993 return 0;
3994
3995 /*
3996 * Shared mappings base their reservation on the number of pages that
3997 * are already allocated on behalf of the file. Private mappings need
3998 * to reserve the full area even if read-only as mprotect() may be
3999 * called to make the mapping read-write. Assume !vma is a shm mapping
4000 */
4001 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4002 resv_map = inode_resv_map(inode);
4003
4004 chg = region_chg(resv_map, from, to);
4005
4006 } else {
4007 resv_map = resv_map_alloc();
4008 if (!resv_map)
4009 return -ENOMEM;
4010
4011 chg = to - from;
4012
4013 set_vma_resv_map(vma, resv_map);
4014 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4015 }
4016
4017 if (chg < 0) {
4018 ret = chg;
4019 goto out_err;
4020 }
4021
4022 /*
4023 * There must be enough pages in the subpool for the mapping. If
4024 * the subpool has a minimum size, there may be some global
4025 * reservations already in place (gbl_reserve).
4026 */
4027 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4028 if (gbl_reserve < 0) {
4029 ret = -ENOSPC;
4030 goto out_err;
4031 }
4032
4033 /*
4034 * Check enough hugepages are available for the reservation.
4035 * Hand the pages back to the subpool if there are not
4036 */
4037 ret = hugetlb_acct_memory(h, gbl_reserve);
4038 if (ret < 0) {
4039 /* put back original number of pages, chg */
4040 (void)hugepage_subpool_put_pages(spool, chg);
4041 goto out_err;
4042 }
4043
4044 /*
4045 * Account for the reservations made. Shared mappings record regions
4046 * that have reservations as they are shared by multiple VMAs.
4047 * When the last VMA disappears, the region map says how much
4048 * the reservation was and the page cache tells how much of
4049 * the reservation was consumed. Private mappings are per-VMA and
4050 * only the consumed reservations are tracked. When the VMA
4051 * disappears, the original reservation is the VMA size and the
4052 * consumed reservations are stored in the map. Hence, nothing
4053 * else has to be done for private mappings here
4054 */
4055 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4056 long add = region_add(resv_map, from, to);
4057
4058 if (unlikely(chg > add)) {
4059 /*
4060 * pages in this range were added to the reserve
4061 * map between region_chg and region_add. This
4062 * indicates a race with alloc_huge_page. Adjust
4063 * the subpool and reserve counts modified above
4064 * based on the difference.
4065 */
4066 long rsv_adjust;
4067
4068 rsv_adjust = hugepage_subpool_put_pages(spool,
4069 chg - add);
4070 hugetlb_acct_memory(h, -rsv_adjust);
4071 }
4072 }
4073 return 0;
4074 out_err:
4075 if (!vma || vma->vm_flags & VM_MAYSHARE)
4076 region_abort(resv_map, from, to);
4077 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4078 kref_put(&resv_map->refs, resv_map_release);
4079 return ret;
4080 }
4081
4082 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4083 long freed)
4084 {
4085 struct hstate *h = hstate_inode(inode);
4086 struct resv_map *resv_map = inode_resv_map(inode);
4087 long chg = 0;
4088 struct hugepage_subpool *spool = subpool_inode(inode);
4089 long gbl_reserve;
4090
4091 if (resv_map) {
4092 chg = region_del(resv_map, start, end);
4093 /*
4094 * region_del() can fail in the rare case where a region
4095 * must be split and another region descriptor can not be
4096 * allocated. If end == LONG_MAX, it will not fail.
4097 */
4098 if (chg < 0)
4099 return chg;
4100 }
4101
4102 spin_lock(&inode->i_lock);
4103 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4104 spin_unlock(&inode->i_lock);
4105
4106 /*
4107 * If the subpool has a minimum size, the number of global
4108 * reservations to be released may be adjusted.
4109 */
4110 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4111 hugetlb_acct_memory(h, -gbl_reserve);
4112
4113 return 0;
4114 }
4115
4116 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4117 static unsigned long page_table_shareable(struct vm_area_struct *svma,
4118 struct vm_area_struct *vma,
4119 unsigned long addr, pgoff_t idx)
4120 {
4121 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4122 svma->vm_start;
4123 unsigned long sbase = saddr & PUD_MASK;
4124 unsigned long s_end = sbase + PUD_SIZE;
4125
4126 /* Allow segments to share if only one is marked locked */
4127 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4128 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4129
4130 /*
4131 * match the virtual addresses, permission and the alignment of the
4132 * page table page.
4133 */
4134 if (pmd_index(addr) != pmd_index(saddr) ||
4135 vm_flags != svm_flags ||
4136 sbase < svma->vm_start || svma->vm_end < s_end)
4137 return 0;
4138
4139 return saddr;
4140 }
4141
4142 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4143 {
4144 unsigned long base = addr & PUD_MASK;
4145 unsigned long end = base + PUD_SIZE;
4146
4147 /*
4148 * check on proper vm_flags and page table alignment
4149 */
4150 if (vma->vm_flags & VM_MAYSHARE &&
4151 vma->vm_start <= base && end <= vma->vm_end)
4152 return true;
4153 return false;
4154 }
4155
4156 /*
4157 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4158 * and returns the corresponding pte. While this is not necessary for the
4159 * !shared pmd case because we can allocate the pmd later as well, it makes the
4160 * code much cleaner. pmd allocation is essential for the shared case because
4161 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4162 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4163 * bad pmd for sharing.
4164 */
4165 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4166 {
4167 struct vm_area_struct *vma = find_vma(mm, addr);
4168 struct address_space *mapping = vma->vm_file->f_mapping;
4169 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4170 vma->vm_pgoff;
4171 struct vm_area_struct *svma;
4172 unsigned long saddr;
4173 pte_t *spte = NULL;
4174 pte_t *pte;
4175 spinlock_t *ptl;
4176
4177 if (!vma_shareable(vma, addr))
4178 return (pte_t *)pmd_alloc(mm, pud, addr);
4179
4180 i_mmap_lock_write(mapping);
4181 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4182 if (svma == vma)
4183 continue;
4184
4185 saddr = page_table_shareable(svma, vma, addr, idx);
4186 if (saddr) {
4187 spte = huge_pte_offset(svma->vm_mm, saddr);
4188 if (spte) {
4189 mm_inc_nr_pmds(mm);
4190 get_page(virt_to_page(spte));
4191 break;
4192 }
4193 }
4194 }
4195
4196 if (!spte)
4197 goto out;
4198
4199 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
4200 spin_lock(ptl);
4201 if (pud_none(*pud)) {
4202 pud_populate(mm, pud,
4203 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4204 } else {
4205 put_page(virt_to_page(spte));
4206 mm_inc_nr_pmds(mm);
4207 }
4208 spin_unlock(ptl);
4209 out:
4210 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4211 i_mmap_unlock_write(mapping);
4212 return pte;
4213 }
4214
4215 /*
4216 * unmap huge page backed by shared pte.
4217 *
4218 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
4219 * indicated by page_count > 1, unmap is achieved by clearing pud and
4220 * decrementing the ref count. If count == 1, the pte page is not shared.
4221 *
4222 * called with page table lock held.
4223 *
4224 * returns: 1 successfully unmapped a shared pte page
4225 * 0 the underlying pte page is not shared, or it is the last user
4226 */
4227 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4228 {
4229 pgd_t *pgd = pgd_offset(mm, *addr);
4230 pud_t *pud = pud_offset(pgd, *addr);
4231
4232 BUG_ON(page_count(virt_to_page(ptep)) == 0);
4233 if (page_count(virt_to_page(ptep)) == 1)
4234 return 0;
4235
4236 pud_clear(pud);
4237 put_page(virt_to_page(ptep));
4238 mm_dec_nr_pmds(mm);
4239 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4240 return 1;
4241 }
4242 #define want_pmd_share() (1)
4243 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4244 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4245 {
4246 return NULL;
4247 }
4248
4249 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4250 {
4251 return 0;
4252 }
4253 #define want_pmd_share() (0)
4254 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4255
4256 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4257 pte_t *huge_pte_alloc(struct mm_struct *mm,
4258 unsigned long addr, unsigned long sz)
4259 {
4260 pgd_t *pgd;
4261 pud_t *pud;
4262 pte_t *pte = NULL;
4263
4264 pgd = pgd_offset(mm, addr);
4265 pud = pud_alloc(mm, pgd, addr);
4266 if (pud) {
4267 if (sz == PUD_SIZE) {
4268 pte = (pte_t *)pud;
4269 } else {
4270 BUG_ON(sz != PMD_SIZE);
4271 if (want_pmd_share() && pud_none(*pud))
4272 pte = huge_pmd_share(mm, addr, pud);
4273 else
4274 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4275 }
4276 }
4277 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
4278
4279 return pte;
4280 }
4281
4282 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
4283 {
4284 pgd_t *pgd;
4285 pud_t *pud;
4286 pmd_t *pmd = NULL;
4287
4288 pgd = pgd_offset(mm, addr);
4289 if (pgd_present(*pgd)) {
4290 pud = pud_offset(pgd, addr);
4291 if (pud_present(*pud)) {
4292 if (pud_huge(*pud))
4293 return (pte_t *)pud;
4294 pmd = pmd_offset(pud, addr);
4295 }
4296 }
4297 return (pte_t *) pmd;
4298 }
4299
4300 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4301
4302 /*
4303 * These functions are overwritable if your architecture needs its own
4304 * behavior.
4305 */
4306 struct page * __weak
4307 follow_huge_addr(struct mm_struct *mm, unsigned long address,
4308 int write)
4309 {
4310 return ERR_PTR(-EINVAL);
4311 }
4312
4313 struct page * __weak
4314 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4315 pmd_t *pmd, int flags)
4316 {
4317 struct page *page = NULL;
4318 spinlock_t *ptl;
4319 retry:
4320 ptl = pmd_lockptr(mm, pmd);
4321 spin_lock(ptl);
4322 /*
4323 * make sure that the address range covered by this pmd is not
4324 * unmapped from other threads.
4325 */
4326 if (!pmd_huge(*pmd))
4327 goto out;
4328 if (pmd_present(*pmd)) {
4329 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4330 if (flags & FOLL_GET)
4331 get_page(page);
4332 } else {
4333 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
4334 spin_unlock(ptl);
4335 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4336 goto retry;
4337 }
4338 /*
4339 * hwpoisoned entry is treated as no_page_table in
4340 * follow_page_mask().
4341 */
4342 }
4343 out:
4344 spin_unlock(ptl);
4345 return page;
4346 }
4347
4348 struct page * __weak
4349 follow_huge_pud(struct mm_struct *mm, unsigned long address,
4350 pud_t *pud, int flags)
4351 {
4352 if (flags & FOLL_GET)
4353 return NULL;
4354
4355 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4356 }
4357
4358 #ifdef CONFIG_MEMORY_FAILURE
4359
4360 /*
4361 * This function is called from memory failure code.
4362 * Assume the caller holds page lock of the head page.
4363 */
4364 int dequeue_hwpoisoned_huge_page(struct page *hpage)
4365 {
4366 struct hstate *h = page_hstate(hpage);
4367 int nid = page_to_nid(hpage);
4368 int ret = -EBUSY;
4369
4370 spin_lock(&hugetlb_lock);
4371 /*
4372 * Just checking !page_huge_active is not enough, because that could be
4373 * an isolated/hwpoisoned hugepage (which have >0 refcount).
4374 */
4375 if (!page_huge_active(hpage) && !page_count(hpage)) {
4376 /*
4377 * Hwpoisoned hugepage isn't linked to activelist or freelist,
4378 * but dangling hpage->lru can trigger list-debug warnings
4379 * (this happens when we call unpoison_memory() on it),
4380 * so let it point to itself with list_del_init().
4381 */
4382 list_del_init(&hpage->lru);
4383 set_page_refcounted(hpage);
4384 h->free_huge_pages--;
4385 h->free_huge_pages_node[nid]--;
4386 ret = 0;
4387 }
4388 spin_unlock(&hugetlb_lock);
4389 return ret;
4390 }
4391 #endif
4392
4393 bool isolate_huge_page(struct page *page, struct list_head *list)
4394 {
4395 bool ret = true;
4396
4397 VM_BUG_ON_PAGE(!PageHead(page), page);
4398 spin_lock(&hugetlb_lock);
4399 if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4400 ret = false;
4401 goto unlock;
4402 }
4403 clear_page_huge_active(page);
4404 list_move_tail(&page->lru, list);
4405 unlock:
4406 spin_unlock(&hugetlb_lock);
4407 return ret;
4408 }
4409
4410 void putback_active_hugepage(struct page *page)
4411 {
4412 VM_BUG_ON_PAGE(!PageHead(page), page);
4413 spin_lock(&hugetlb_lock);
4414 set_page_huge_active(page);
4415 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4416 spin_unlock(&hugetlb_lock);
4417 put_page(page);
4418 }
This page took 0.13553 seconds and 4 git commands to generate.