memoryless nodes: fixup uses of node_online_map in generic code
[deliverable/linux.git] / mm / mempolicy.c
1 /*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
21 *
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
33 *
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56 /* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 could replace all the switch()es with a mempolicy_ops structure.
67 */
68
69 #include <linux/mempolicy.h>
70 #include <linux/mm.h>
71 #include <linux/highmem.h>
72 #include <linux/hugetlb.h>
73 #include <linux/kernel.h>
74 #include <linux/sched.h>
75 #include <linux/nodemask.h>
76 #include <linux/cpuset.h>
77 #include <linux/gfp.h>
78 #include <linux/slab.h>
79 #include <linux/string.h>
80 #include <linux/module.h>
81 #include <linux/interrupt.h>
82 #include <linux/init.h>
83 #include <linux/compat.h>
84 #include <linux/swap.h>
85 #include <linux/seq_file.h>
86 #include <linux/proc_fs.h>
87 #include <linux/migrate.h>
88 #include <linux/rmap.h>
89 #include <linux/security.h>
90
91 #include <asm/tlbflush.h>
92 #include <asm/uaccess.h>
93
94 /* Internal flags */
95 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
96 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
97 #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
98
99 static struct kmem_cache *policy_cache;
100 static struct kmem_cache *sn_cache;
101
102 /* Highest zone. An specific allocation for a zone below that is not
103 policied. */
104 enum zone_type policy_zone = 0;
105
106 struct mempolicy default_policy = {
107 .refcnt = ATOMIC_INIT(1), /* never free it */
108 .policy = MPOL_DEFAULT,
109 };
110
111 /* Do sanity checking on a policy */
112 static int mpol_check_policy(int mode, nodemask_t *nodes)
113 {
114 int empty = nodes_empty(*nodes);
115
116 switch (mode) {
117 case MPOL_DEFAULT:
118 if (!empty)
119 return -EINVAL;
120 break;
121 case MPOL_BIND:
122 case MPOL_INTERLEAVE:
123 /* Preferred will only use the first bit, but allow
124 more for now. */
125 if (empty)
126 return -EINVAL;
127 break;
128 }
129 return nodes_subset(*nodes, node_states[N_HIGH_MEMORY]) ? 0 : -EINVAL;
130 }
131
132 /* Generate a custom zonelist for the BIND policy. */
133 static struct zonelist *bind_zonelist(nodemask_t *nodes)
134 {
135 struct zonelist *zl;
136 int num, max, nd;
137 enum zone_type k;
138
139 max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
140 max++; /* space for zlcache_ptr (see mmzone.h) */
141 zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
142 if (!zl)
143 return ERR_PTR(-ENOMEM);
144 zl->zlcache_ptr = NULL;
145 num = 0;
146 /* First put in the highest zones from all nodes, then all the next
147 lower zones etc. Avoid empty zones because the memory allocator
148 doesn't like them. If you implement node hot removal you
149 have to fix that. */
150 k = MAX_NR_ZONES - 1;
151 while (1) {
152 for_each_node_mask(nd, *nodes) {
153 struct zone *z = &NODE_DATA(nd)->node_zones[k];
154 if (z->present_pages > 0)
155 zl->zones[num++] = z;
156 }
157 if (k == 0)
158 break;
159 k--;
160 }
161 if (num == 0) {
162 kfree(zl);
163 return ERR_PTR(-EINVAL);
164 }
165 zl->zones[num] = NULL;
166 return zl;
167 }
168
169 /* Create a new policy */
170 static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
171 {
172 struct mempolicy *policy;
173
174 pr_debug("setting mode %d nodes[0] %lx\n",
175 mode, nodes ? nodes_addr(*nodes)[0] : -1);
176
177 if (mode == MPOL_DEFAULT)
178 return NULL;
179 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
180 if (!policy)
181 return ERR_PTR(-ENOMEM);
182 atomic_set(&policy->refcnt, 1);
183 switch (mode) {
184 case MPOL_INTERLEAVE:
185 policy->v.nodes = *nodes;
186 nodes_and(policy->v.nodes, policy->v.nodes,
187 node_states[N_HIGH_MEMORY]);
188 if (nodes_weight(policy->v.nodes) == 0) {
189 kmem_cache_free(policy_cache, policy);
190 return ERR_PTR(-EINVAL);
191 }
192 break;
193 case MPOL_PREFERRED:
194 policy->v.preferred_node = first_node(*nodes);
195 if (policy->v.preferred_node >= MAX_NUMNODES)
196 policy->v.preferred_node = -1;
197 break;
198 case MPOL_BIND:
199 policy->v.zonelist = bind_zonelist(nodes);
200 if (IS_ERR(policy->v.zonelist)) {
201 void *error_code = policy->v.zonelist;
202 kmem_cache_free(policy_cache, policy);
203 return error_code;
204 }
205 break;
206 }
207 policy->policy = mode;
208 policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
209 return policy;
210 }
211
212 static void gather_stats(struct page *, void *, int pte_dirty);
213 static void migrate_page_add(struct page *page, struct list_head *pagelist,
214 unsigned long flags);
215
216 /* Scan through pages checking if pages follow certain conditions. */
217 static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
218 unsigned long addr, unsigned long end,
219 const nodemask_t *nodes, unsigned long flags,
220 void *private)
221 {
222 pte_t *orig_pte;
223 pte_t *pte;
224 spinlock_t *ptl;
225
226 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
227 do {
228 struct page *page;
229 int nid;
230
231 if (!pte_present(*pte))
232 continue;
233 page = vm_normal_page(vma, addr, *pte);
234 if (!page)
235 continue;
236 /*
237 * The check for PageReserved here is important to avoid
238 * handling zero pages and other pages that may have been
239 * marked special by the system.
240 *
241 * If the PageReserved would not be checked here then f.e.
242 * the location of the zero page could have an influence
243 * on MPOL_MF_STRICT, zero pages would be counted for
244 * the per node stats, and there would be useless attempts
245 * to put zero pages on the migration list.
246 */
247 if (PageReserved(page))
248 continue;
249 nid = page_to_nid(page);
250 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
251 continue;
252
253 if (flags & MPOL_MF_STATS)
254 gather_stats(page, private, pte_dirty(*pte));
255 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
256 migrate_page_add(page, private, flags);
257 else
258 break;
259 } while (pte++, addr += PAGE_SIZE, addr != end);
260 pte_unmap_unlock(orig_pte, ptl);
261 return addr != end;
262 }
263
264 static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
265 unsigned long addr, unsigned long end,
266 const nodemask_t *nodes, unsigned long flags,
267 void *private)
268 {
269 pmd_t *pmd;
270 unsigned long next;
271
272 pmd = pmd_offset(pud, addr);
273 do {
274 next = pmd_addr_end(addr, end);
275 if (pmd_none_or_clear_bad(pmd))
276 continue;
277 if (check_pte_range(vma, pmd, addr, next, nodes,
278 flags, private))
279 return -EIO;
280 } while (pmd++, addr = next, addr != end);
281 return 0;
282 }
283
284 static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
285 unsigned long addr, unsigned long end,
286 const nodemask_t *nodes, unsigned long flags,
287 void *private)
288 {
289 pud_t *pud;
290 unsigned long next;
291
292 pud = pud_offset(pgd, addr);
293 do {
294 next = pud_addr_end(addr, end);
295 if (pud_none_or_clear_bad(pud))
296 continue;
297 if (check_pmd_range(vma, pud, addr, next, nodes,
298 flags, private))
299 return -EIO;
300 } while (pud++, addr = next, addr != end);
301 return 0;
302 }
303
304 static inline int check_pgd_range(struct vm_area_struct *vma,
305 unsigned long addr, unsigned long end,
306 const nodemask_t *nodes, unsigned long flags,
307 void *private)
308 {
309 pgd_t *pgd;
310 unsigned long next;
311
312 pgd = pgd_offset(vma->vm_mm, addr);
313 do {
314 next = pgd_addr_end(addr, end);
315 if (pgd_none_or_clear_bad(pgd))
316 continue;
317 if (check_pud_range(vma, pgd, addr, next, nodes,
318 flags, private))
319 return -EIO;
320 } while (pgd++, addr = next, addr != end);
321 return 0;
322 }
323
324 /*
325 * Check if all pages in a range are on a set of nodes.
326 * If pagelist != NULL then isolate pages from the LRU and
327 * put them on the pagelist.
328 */
329 static struct vm_area_struct *
330 check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
331 const nodemask_t *nodes, unsigned long flags, void *private)
332 {
333 int err;
334 struct vm_area_struct *first, *vma, *prev;
335
336 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
337
338 err = migrate_prep();
339 if (err)
340 return ERR_PTR(err);
341 }
342
343 first = find_vma(mm, start);
344 if (!first)
345 return ERR_PTR(-EFAULT);
346 prev = NULL;
347 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
348 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
349 if (!vma->vm_next && vma->vm_end < end)
350 return ERR_PTR(-EFAULT);
351 if (prev && prev->vm_end < vma->vm_start)
352 return ERR_PTR(-EFAULT);
353 }
354 if (!is_vm_hugetlb_page(vma) &&
355 ((flags & MPOL_MF_STRICT) ||
356 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
357 vma_migratable(vma)))) {
358 unsigned long endvma = vma->vm_end;
359
360 if (endvma > end)
361 endvma = end;
362 if (vma->vm_start > start)
363 start = vma->vm_start;
364 err = check_pgd_range(vma, start, endvma, nodes,
365 flags, private);
366 if (err) {
367 first = ERR_PTR(err);
368 break;
369 }
370 }
371 prev = vma;
372 }
373 return first;
374 }
375
376 /* Apply policy to a single VMA */
377 static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
378 {
379 int err = 0;
380 struct mempolicy *old = vma->vm_policy;
381
382 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
383 vma->vm_start, vma->vm_end, vma->vm_pgoff,
384 vma->vm_ops, vma->vm_file,
385 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
386
387 if (vma->vm_ops && vma->vm_ops->set_policy)
388 err = vma->vm_ops->set_policy(vma, new);
389 if (!err) {
390 mpol_get(new);
391 vma->vm_policy = new;
392 mpol_free(old);
393 }
394 return err;
395 }
396
397 /* Step 2: apply policy to a range and do splits. */
398 static int mbind_range(struct vm_area_struct *vma, unsigned long start,
399 unsigned long end, struct mempolicy *new)
400 {
401 struct vm_area_struct *next;
402 int err;
403
404 err = 0;
405 for (; vma && vma->vm_start < end; vma = next) {
406 next = vma->vm_next;
407 if (vma->vm_start < start)
408 err = split_vma(vma->vm_mm, vma, start, 1);
409 if (!err && vma->vm_end > end)
410 err = split_vma(vma->vm_mm, vma, end, 0);
411 if (!err)
412 err = policy_vma(vma, new);
413 if (err)
414 break;
415 }
416 return err;
417 }
418
419 static int contextualize_policy(int mode, nodemask_t *nodes)
420 {
421 if (!nodes)
422 return 0;
423
424 cpuset_update_task_memory_state();
425 if (!cpuset_nodes_subset_current_mems_allowed(*nodes))
426 return -EINVAL;
427 return mpol_check_policy(mode, nodes);
428 }
429
430
431 /*
432 * Update task->flags PF_MEMPOLICY bit: set iff non-default
433 * mempolicy. Allows more rapid checking of this (combined perhaps
434 * with other PF_* flag bits) on memory allocation hot code paths.
435 *
436 * If called from outside this file, the task 'p' should -only- be
437 * a newly forked child not yet visible on the task list, because
438 * manipulating the task flags of a visible task is not safe.
439 *
440 * The above limitation is why this routine has the funny name
441 * mpol_fix_fork_child_flag().
442 *
443 * It is also safe to call this with a task pointer of current,
444 * which the static wrapper mpol_set_task_struct_flag() does,
445 * for use within this file.
446 */
447
448 void mpol_fix_fork_child_flag(struct task_struct *p)
449 {
450 if (p->mempolicy)
451 p->flags |= PF_MEMPOLICY;
452 else
453 p->flags &= ~PF_MEMPOLICY;
454 }
455
456 static void mpol_set_task_struct_flag(void)
457 {
458 mpol_fix_fork_child_flag(current);
459 }
460
461 /* Set the process memory policy */
462 long do_set_mempolicy(int mode, nodemask_t *nodes)
463 {
464 struct mempolicy *new;
465
466 if (contextualize_policy(mode, nodes))
467 return -EINVAL;
468 new = mpol_new(mode, nodes);
469 if (IS_ERR(new))
470 return PTR_ERR(new);
471 mpol_free(current->mempolicy);
472 current->mempolicy = new;
473 mpol_set_task_struct_flag();
474 if (new && new->policy == MPOL_INTERLEAVE)
475 current->il_next = first_node(new->v.nodes);
476 return 0;
477 }
478
479 /* Fill a zone bitmap for a policy */
480 static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
481 {
482 int i;
483
484 nodes_clear(*nodes);
485 switch (p->policy) {
486 case MPOL_BIND:
487 for (i = 0; p->v.zonelist->zones[i]; i++)
488 node_set(zone_to_nid(p->v.zonelist->zones[i]),
489 *nodes);
490 break;
491 case MPOL_DEFAULT:
492 break;
493 case MPOL_INTERLEAVE:
494 *nodes = p->v.nodes;
495 break;
496 case MPOL_PREFERRED:
497 /* or use current node instead of memory_map? */
498 if (p->v.preferred_node < 0)
499 *nodes = node_states[N_HIGH_MEMORY];
500 else
501 node_set(p->v.preferred_node, *nodes);
502 break;
503 default:
504 BUG();
505 }
506 }
507
508 static int lookup_node(struct mm_struct *mm, unsigned long addr)
509 {
510 struct page *p;
511 int err;
512
513 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
514 if (err >= 0) {
515 err = page_to_nid(p);
516 put_page(p);
517 }
518 return err;
519 }
520
521 /* Retrieve NUMA policy */
522 long do_get_mempolicy(int *policy, nodemask_t *nmask,
523 unsigned long addr, unsigned long flags)
524 {
525 int err;
526 struct mm_struct *mm = current->mm;
527 struct vm_area_struct *vma = NULL;
528 struct mempolicy *pol = current->mempolicy;
529
530 cpuset_update_task_memory_state();
531 if (flags &
532 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
533 return -EINVAL;
534
535 if (flags & MPOL_F_MEMS_ALLOWED) {
536 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
537 return -EINVAL;
538 *policy = 0; /* just so it's initialized */
539 *nmask = cpuset_current_mems_allowed;
540 return 0;
541 }
542
543 if (flags & MPOL_F_ADDR) {
544 down_read(&mm->mmap_sem);
545 vma = find_vma_intersection(mm, addr, addr+1);
546 if (!vma) {
547 up_read(&mm->mmap_sem);
548 return -EFAULT;
549 }
550 if (vma->vm_ops && vma->vm_ops->get_policy)
551 pol = vma->vm_ops->get_policy(vma, addr);
552 else
553 pol = vma->vm_policy;
554 } else if (addr)
555 return -EINVAL;
556
557 if (!pol)
558 pol = &default_policy;
559
560 if (flags & MPOL_F_NODE) {
561 if (flags & MPOL_F_ADDR) {
562 err = lookup_node(mm, addr);
563 if (err < 0)
564 goto out;
565 *policy = err;
566 } else if (pol == current->mempolicy &&
567 pol->policy == MPOL_INTERLEAVE) {
568 *policy = current->il_next;
569 } else {
570 err = -EINVAL;
571 goto out;
572 }
573 } else
574 *policy = pol->policy;
575
576 if (vma) {
577 up_read(&current->mm->mmap_sem);
578 vma = NULL;
579 }
580
581 err = 0;
582 if (nmask)
583 get_zonemask(pol, nmask);
584
585 out:
586 if (vma)
587 up_read(&current->mm->mmap_sem);
588 return err;
589 }
590
591 #ifdef CONFIG_MIGRATION
592 /*
593 * page migration
594 */
595 static void migrate_page_add(struct page *page, struct list_head *pagelist,
596 unsigned long flags)
597 {
598 /*
599 * Avoid migrating a page that is shared with others.
600 */
601 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
602 isolate_lru_page(page, pagelist);
603 }
604
605 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
606 {
607 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
608 }
609
610 /*
611 * Migrate pages from one node to a target node.
612 * Returns error or the number of pages not migrated.
613 */
614 int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags)
615 {
616 nodemask_t nmask;
617 LIST_HEAD(pagelist);
618 int err = 0;
619
620 nodes_clear(nmask);
621 node_set(source, nmask);
622
623 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
624 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
625
626 if (!list_empty(&pagelist))
627 err = migrate_pages(&pagelist, new_node_page, dest);
628
629 return err;
630 }
631
632 /*
633 * Move pages between the two nodesets so as to preserve the physical
634 * layout as much as possible.
635 *
636 * Returns the number of page that could not be moved.
637 */
638 int do_migrate_pages(struct mm_struct *mm,
639 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
640 {
641 LIST_HEAD(pagelist);
642 int busy = 0;
643 int err = 0;
644 nodemask_t tmp;
645
646 down_read(&mm->mmap_sem);
647
648 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
649 if (err)
650 goto out;
651
652 /*
653 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
654 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
655 * bit in 'tmp', and return that <source, dest> pair for migration.
656 * The pair of nodemasks 'to' and 'from' define the map.
657 *
658 * If no pair of bits is found that way, fallback to picking some
659 * pair of 'source' and 'dest' bits that are not the same. If the
660 * 'source' and 'dest' bits are the same, this represents a node
661 * that will be migrating to itself, so no pages need move.
662 *
663 * If no bits are left in 'tmp', or if all remaining bits left
664 * in 'tmp' correspond to the same bit in 'to', return false
665 * (nothing left to migrate).
666 *
667 * This lets us pick a pair of nodes to migrate between, such that
668 * if possible the dest node is not already occupied by some other
669 * source node, minimizing the risk of overloading the memory on a
670 * node that would happen if we migrated incoming memory to a node
671 * before migrating outgoing memory source that same node.
672 *
673 * A single scan of tmp is sufficient. As we go, we remember the
674 * most recent <s, d> pair that moved (s != d). If we find a pair
675 * that not only moved, but what's better, moved to an empty slot
676 * (d is not set in tmp), then we break out then, with that pair.
677 * Otherwise when we finish scannng from_tmp, we at least have the
678 * most recent <s, d> pair that moved. If we get all the way through
679 * the scan of tmp without finding any node that moved, much less
680 * moved to an empty node, then there is nothing left worth migrating.
681 */
682
683 tmp = *from_nodes;
684 while (!nodes_empty(tmp)) {
685 int s,d;
686 int source = -1;
687 int dest = 0;
688
689 for_each_node_mask(s, tmp) {
690 d = node_remap(s, *from_nodes, *to_nodes);
691 if (s == d)
692 continue;
693
694 source = s; /* Node moved. Memorize */
695 dest = d;
696
697 /* dest not in remaining from nodes? */
698 if (!node_isset(dest, tmp))
699 break;
700 }
701 if (source == -1)
702 break;
703
704 node_clear(source, tmp);
705 err = migrate_to_node(mm, source, dest, flags);
706 if (err > 0)
707 busy += err;
708 if (err < 0)
709 break;
710 }
711 out:
712 up_read(&mm->mmap_sem);
713 if (err < 0)
714 return err;
715 return busy;
716
717 }
718
719 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
720 {
721 struct vm_area_struct *vma = (struct vm_area_struct *)private;
722
723 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
724 page_address_in_vma(page, vma));
725 }
726 #else
727
728 static void migrate_page_add(struct page *page, struct list_head *pagelist,
729 unsigned long flags)
730 {
731 }
732
733 int do_migrate_pages(struct mm_struct *mm,
734 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
735 {
736 return -ENOSYS;
737 }
738
739 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
740 {
741 return NULL;
742 }
743 #endif
744
745 long do_mbind(unsigned long start, unsigned long len,
746 unsigned long mode, nodemask_t *nmask, unsigned long flags)
747 {
748 struct vm_area_struct *vma;
749 struct mm_struct *mm = current->mm;
750 struct mempolicy *new;
751 unsigned long end;
752 int err;
753 LIST_HEAD(pagelist);
754
755 if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
756 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
757 || mode > MPOL_MAX)
758 return -EINVAL;
759 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
760 return -EPERM;
761
762 if (start & ~PAGE_MASK)
763 return -EINVAL;
764
765 if (mode == MPOL_DEFAULT)
766 flags &= ~MPOL_MF_STRICT;
767
768 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
769 end = start + len;
770
771 if (end < start)
772 return -EINVAL;
773 if (end == start)
774 return 0;
775
776 if (mpol_check_policy(mode, nmask))
777 return -EINVAL;
778
779 new = mpol_new(mode, nmask);
780 if (IS_ERR(new))
781 return PTR_ERR(new);
782
783 /*
784 * If we are using the default policy then operation
785 * on discontinuous address spaces is okay after all
786 */
787 if (!new)
788 flags |= MPOL_MF_DISCONTIG_OK;
789
790 pr_debug("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
791 mode, nmask ? nodes_addr(*nmask)[0] : -1);
792
793 down_write(&mm->mmap_sem);
794 vma = check_range(mm, start, end, nmask,
795 flags | MPOL_MF_INVERT, &pagelist);
796
797 err = PTR_ERR(vma);
798 if (!IS_ERR(vma)) {
799 int nr_failed = 0;
800
801 err = mbind_range(vma, start, end, new);
802
803 if (!list_empty(&pagelist))
804 nr_failed = migrate_pages(&pagelist, new_vma_page,
805 (unsigned long)vma);
806
807 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
808 err = -EIO;
809 }
810
811 up_write(&mm->mmap_sem);
812 mpol_free(new);
813 return err;
814 }
815
816 /*
817 * User space interface with variable sized bitmaps for nodelists.
818 */
819
820 /* Copy a node mask from user space. */
821 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
822 unsigned long maxnode)
823 {
824 unsigned long k;
825 unsigned long nlongs;
826 unsigned long endmask;
827
828 --maxnode;
829 nodes_clear(*nodes);
830 if (maxnode == 0 || !nmask)
831 return 0;
832 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
833 return -EINVAL;
834
835 nlongs = BITS_TO_LONGS(maxnode);
836 if ((maxnode % BITS_PER_LONG) == 0)
837 endmask = ~0UL;
838 else
839 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
840
841 /* When the user specified more nodes than supported just check
842 if the non supported part is all zero. */
843 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
844 if (nlongs > PAGE_SIZE/sizeof(long))
845 return -EINVAL;
846 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
847 unsigned long t;
848 if (get_user(t, nmask + k))
849 return -EFAULT;
850 if (k == nlongs - 1) {
851 if (t & endmask)
852 return -EINVAL;
853 } else if (t)
854 return -EINVAL;
855 }
856 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
857 endmask = ~0UL;
858 }
859
860 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
861 return -EFAULT;
862 nodes_addr(*nodes)[nlongs-1] &= endmask;
863 return 0;
864 }
865
866 /* Copy a kernel node mask to user space */
867 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
868 nodemask_t *nodes)
869 {
870 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
871 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
872
873 if (copy > nbytes) {
874 if (copy > PAGE_SIZE)
875 return -EINVAL;
876 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
877 return -EFAULT;
878 copy = nbytes;
879 }
880 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
881 }
882
883 asmlinkage long sys_mbind(unsigned long start, unsigned long len,
884 unsigned long mode,
885 unsigned long __user *nmask, unsigned long maxnode,
886 unsigned flags)
887 {
888 nodemask_t nodes;
889 int err;
890
891 err = get_nodes(&nodes, nmask, maxnode);
892 if (err)
893 return err;
894 #ifdef CONFIG_CPUSETS
895 /* Restrict the nodes to the allowed nodes in the cpuset */
896 nodes_and(nodes, nodes, current->mems_allowed);
897 #endif
898 return do_mbind(start, len, mode, &nodes, flags);
899 }
900
901 /* Set the process memory policy */
902 asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
903 unsigned long maxnode)
904 {
905 int err;
906 nodemask_t nodes;
907
908 if (mode < 0 || mode > MPOL_MAX)
909 return -EINVAL;
910 err = get_nodes(&nodes, nmask, maxnode);
911 if (err)
912 return err;
913 return do_set_mempolicy(mode, &nodes);
914 }
915
916 asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
917 const unsigned long __user *old_nodes,
918 const unsigned long __user *new_nodes)
919 {
920 struct mm_struct *mm;
921 struct task_struct *task;
922 nodemask_t old;
923 nodemask_t new;
924 nodemask_t task_nodes;
925 int err;
926
927 err = get_nodes(&old, old_nodes, maxnode);
928 if (err)
929 return err;
930
931 err = get_nodes(&new, new_nodes, maxnode);
932 if (err)
933 return err;
934
935 /* Find the mm_struct */
936 read_lock(&tasklist_lock);
937 task = pid ? find_task_by_pid(pid) : current;
938 if (!task) {
939 read_unlock(&tasklist_lock);
940 return -ESRCH;
941 }
942 mm = get_task_mm(task);
943 read_unlock(&tasklist_lock);
944
945 if (!mm)
946 return -EINVAL;
947
948 /*
949 * Check if this process has the right to modify the specified
950 * process. The right exists if the process has administrative
951 * capabilities, superuser privileges or the same
952 * userid as the target process.
953 */
954 if ((current->euid != task->suid) && (current->euid != task->uid) &&
955 (current->uid != task->suid) && (current->uid != task->uid) &&
956 !capable(CAP_SYS_NICE)) {
957 err = -EPERM;
958 goto out;
959 }
960
961 task_nodes = cpuset_mems_allowed(task);
962 /* Is the user allowed to access the target nodes? */
963 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
964 err = -EPERM;
965 goto out;
966 }
967
968 if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
969 err = -EINVAL;
970 goto out;
971 }
972
973 err = security_task_movememory(task);
974 if (err)
975 goto out;
976
977 err = do_migrate_pages(mm, &old, &new,
978 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
979 out:
980 mmput(mm);
981 return err;
982 }
983
984
985 /* Retrieve NUMA policy */
986 asmlinkage long sys_get_mempolicy(int __user *policy,
987 unsigned long __user *nmask,
988 unsigned long maxnode,
989 unsigned long addr, unsigned long flags)
990 {
991 int err, pval;
992 nodemask_t nodes;
993
994 if (nmask != NULL && maxnode < MAX_NUMNODES)
995 return -EINVAL;
996
997 err = do_get_mempolicy(&pval, &nodes, addr, flags);
998
999 if (err)
1000 return err;
1001
1002 if (policy && put_user(pval, policy))
1003 return -EFAULT;
1004
1005 if (nmask)
1006 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1007
1008 return err;
1009 }
1010
1011 #ifdef CONFIG_COMPAT
1012
1013 asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1014 compat_ulong_t __user *nmask,
1015 compat_ulong_t maxnode,
1016 compat_ulong_t addr, compat_ulong_t flags)
1017 {
1018 long err;
1019 unsigned long __user *nm = NULL;
1020 unsigned long nr_bits, alloc_size;
1021 DECLARE_BITMAP(bm, MAX_NUMNODES);
1022
1023 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1024 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1025
1026 if (nmask)
1027 nm = compat_alloc_user_space(alloc_size);
1028
1029 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1030
1031 if (!err && nmask) {
1032 err = copy_from_user(bm, nm, alloc_size);
1033 /* ensure entire bitmap is zeroed */
1034 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1035 err |= compat_put_bitmap(nmask, bm, nr_bits);
1036 }
1037
1038 return err;
1039 }
1040
1041 asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1042 compat_ulong_t maxnode)
1043 {
1044 long err = 0;
1045 unsigned long __user *nm = NULL;
1046 unsigned long nr_bits, alloc_size;
1047 DECLARE_BITMAP(bm, MAX_NUMNODES);
1048
1049 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1050 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1051
1052 if (nmask) {
1053 err = compat_get_bitmap(bm, nmask, nr_bits);
1054 nm = compat_alloc_user_space(alloc_size);
1055 err |= copy_to_user(nm, bm, alloc_size);
1056 }
1057
1058 if (err)
1059 return -EFAULT;
1060
1061 return sys_set_mempolicy(mode, nm, nr_bits+1);
1062 }
1063
1064 asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1065 compat_ulong_t mode, compat_ulong_t __user *nmask,
1066 compat_ulong_t maxnode, compat_ulong_t flags)
1067 {
1068 long err = 0;
1069 unsigned long __user *nm = NULL;
1070 unsigned long nr_bits, alloc_size;
1071 nodemask_t bm;
1072
1073 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1074 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1075
1076 if (nmask) {
1077 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1078 nm = compat_alloc_user_space(alloc_size);
1079 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1080 }
1081
1082 if (err)
1083 return -EFAULT;
1084
1085 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1086 }
1087
1088 #endif
1089
1090 /*
1091 * get_vma_policy(@task, @vma, @addr)
1092 * @task - task for fallback if vma policy == default
1093 * @vma - virtual memory area whose policy is sought
1094 * @addr - address in @vma for shared policy lookup
1095 *
1096 * Returns effective policy for a VMA at specified address.
1097 * Falls back to @task or system default policy, as necessary.
1098 * Returned policy has extra reference count if shared, vma,
1099 * or some other task's policy [show_numa_maps() can pass
1100 * @task != current]. It is the caller's responsibility to
1101 * free the reference in these cases.
1102 */
1103 static struct mempolicy * get_vma_policy(struct task_struct *task,
1104 struct vm_area_struct *vma, unsigned long addr)
1105 {
1106 struct mempolicy *pol = task->mempolicy;
1107 int shared_pol = 0;
1108
1109 if (vma) {
1110 if (vma->vm_ops && vma->vm_ops->get_policy) {
1111 pol = vma->vm_ops->get_policy(vma, addr);
1112 shared_pol = 1; /* if pol non-NULL, add ref below */
1113 } else if (vma->vm_policy &&
1114 vma->vm_policy->policy != MPOL_DEFAULT)
1115 pol = vma->vm_policy;
1116 }
1117 if (!pol)
1118 pol = &default_policy;
1119 else if (!shared_pol && pol != current->mempolicy)
1120 mpol_get(pol); /* vma or other task's policy */
1121 return pol;
1122 }
1123
1124 /* Return a zonelist representing a mempolicy */
1125 static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
1126 {
1127 int nd;
1128
1129 switch (policy->policy) {
1130 case MPOL_PREFERRED:
1131 nd = policy->v.preferred_node;
1132 if (nd < 0)
1133 nd = numa_node_id();
1134 break;
1135 case MPOL_BIND:
1136 /* Lower zones don't get a policy applied */
1137 /* Careful: current->mems_allowed might have moved */
1138 if (gfp_zone(gfp) >= policy_zone)
1139 if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
1140 return policy->v.zonelist;
1141 /*FALL THROUGH*/
1142 case MPOL_INTERLEAVE: /* should not happen */
1143 case MPOL_DEFAULT:
1144 nd = numa_node_id();
1145 break;
1146 default:
1147 nd = 0;
1148 BUG();
1149 }
1150 return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
1151 }
1152
1153 /* Do dynamic interleaving for a process */
1154 static unsigned interleave_nodes(struct mempolicy *policy)
1155 {
1156 unsigned nid, next;
1157 struct task_struct *me = current;
1158
1159 nid = me->il_next;
1160 next = next_node(nid, policy->v.nodes);
1161 if (next >= MAX_NUMNODES)
1162 next = first_node(policy->v.nodes);
1163 me->il_next = next;
1164 return nid;
1165 }
1166
1167 /*
1168 * Depending on the memory policy provide a node from which to allocate the
1169 * next slab entry.
1170 */
1171 unsigned slab_node(struct mempolicy *policy)
1172 {
1173 int pol = policy ? policy->policy : MPOL_DEFAULT;
1174
1175 switch (pol) {
1176 case MPOL_INTERLEAVE:
1177 return interleave_nodes(policy);
1178
1179 case MPOL_BIND:
1180 /*
1181 * Follow bind policy behavior and start allocation at the
1182 * first node.
1183 */
1184 return zone_to_nid(policy->v.zonelist->zones[0]);
1185
1186 case MPOL_PREFERRED:
1187 if (policy->v.preferred_node >= 0)
1188 return policy->v.preferred_node;
1189 /* Fall through */
1190
1191 default:
1192 return numa_node_id();
1193 }
1194 }
1195
1196 /* Do static interleaving for a VMA with known offset. */
1197 static unsigned offset_il_node(struct mempolicy *pol,
1198 struct vm_area_struct *vma, unsigned long off)
1199 {
1200 unsigned nnodes = nodes_weight(pol->v.nodes);
1201 unsigned target = (unsigned)off % nnodes;
1202 int c;
1203 int nid = -1;
1204
1205 c = 0;
1206 do {
1207 nid = next_node(nid, pol->v.nodes);
1208 c++;
1209 } while (c <= target);
1210 return nid;
1211 }
1212
1213 /* Determine a node number for interleave */
1214 static inline unsigned interleave_nid(struct mempolicy *pol,
1215 struct vm_area_struct *vma, unsigned long addr, int shift)
1216 {
1217 if (vma) {
1218 unsigned long off;
1219
1220 /*
1221 * for small pages, there is no difference between
1222 * shift and PAGE_SHIFT, so the bit-shift is safe.
1223 * for huge pages, since vm_pgoff is in units of small
1224 * pages, we need to shift off the always 0 bits to get
1225 * a useful offset.
1226 */
1227 BUG_ON(shift < PAGE_SHIFT);
1228 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1229 off += (addr - vma->vm_start) >> shift;
1230 return offset_il_node(pol, vma, off);
1231 } else
1232 return interleave_nodes(pol);
1233 }
1234
1235 #ifdef CONFIG_HUGETLBFS
1236 /*
1237 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1238 * @vma = virtual memory area whose policy is sought
1239 * @addr = address in @vma for shared policy lookup and interleave policy
1240 * @gfp_flags = for requested zone
1241 * @mpol = pointer to mempolicy pointer for reference counted 'BIND policy
1242 *
1243 * Returns a zonelist suitable for a huge page allocation.
1244 * If the effective policy is 'BIND, returns pointer to policy's zonelist.
1245 * If it is also a policy for which get_vma_policy() returns an extra
1246 * reference, we must hold that reference until after allocation.
1247 * In that case, return policy via @mpol so hugetlb allocation can drop
1248 * the reference. For non-'BIND referenced policies, we can/do drop the
1249 * reference here, so the caller doesn't need to know about the special case
1250 * for default and current task policy.
1251 */
1252 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1253 gfp_t gfp_flags, struct mempolicy **mpol)
1254 {
1255 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1256 struct zonelist *zl;
1257
1258 *mpol = NULL; /* probably no unref needed */
1259 if (pol->policy == MPOL_INTERLEAVE) {
1260 unsigned nid;
1261
1262 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
1263 __mpol_free(pol); /* finished with pol */
1264 return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags);
1265 }
1266
1267 zl = zonelist_policy(GFP_HIGHUSER, pol);
1268 if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1269 if (pol->policy != MPOL_BIND)
1270 __mpol_free(pol); /* finished with pol */
1271 else
1272 *mpol = pol; /* unref needed after allocation */
1273 }
1274 return zl;
1275 }
1276 #endif
1277
1278 /* Allocate a page in interleaved policy.
1279 Own path because it needs to do special accounting. */
1280 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1281 unsigned nid)
1282 {
1283 struct zonelist *zl;
1284 struct page *page;
1285
1286 zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
1287 page = __alloc_pages(gfp, order, zl);
1288 if (page && page_zone(page) == zl->zones[0])
1289 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1290 return page;
1291 }
1292
1293 /**
1294 * alloc_page_vma - Allocate a page for a VMA.
1295 *
1296 * @gfp:
1297 * %GFP_USER user allocation.
1298 * %GFP_KERNEL kernel allocations,
1299 * %GFP_HIGHMEM highmem/user allocations,
1300 * %GFP_FS allocation should not call back into a file system.
1301 * %GFP_ATOMIC don't sleep.
1302 *
1303 * @vma: Pointer to VMA or NULL if not available.
1304 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1305 *
1306 * This function allocates a page from the kernel page pool and applies
1307 * a NUMA policy associated with the VMA or the current process.
1308 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1309 * mm_struct of the VMA to prevent it from going away. Should be used for
1310 * all allocations for pages that will be mapped into
1311 * user space. Returns NULL when no page can be allocated.
1312 *
1313 * Should be called with the mm_sem of the vma hold.
1314 */
1315 struct page *
1316 alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1317 {
1318 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1319 struct zonelist *zl;
1320
1321 cpuset_update_task_memory_state();
1322
1323 if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
1324 unsigned nid;
1325
1326 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1327 return alloc_page_interleave(gfp, 0, nid);
1328 }
1329 zl = zonelist_policy(gfp, pol);
1330 if (pol != &default_policy && pol != current->mempolicy) {
1331 /*
1332 * slow path: ref counted policy -- shared or vma
1333 */
1334 struct page *page = __alloc_pages(gfp, 0, zl);
1335 __mpol_free(pol);
1336 return page;
1337 }
1338 /*
1339 * fast path: default or task policy
1340 */
1341 return __alloc_pages(gfp, 0, zl);
1342 }
1343
1344 /**
1345 * alloc_pages_current - Allocate pages.
1346 *
1347 * @gfp:
1348 * %GFP_USER user allocation,
1349 * %GFP_KERNEL kernel allocation,
1350 * %GFP_HIGHMEM highmem allocation,
1351 * %GFP_FS don't call back into a file system.
1352 * %GFP_ATOMIC don't sleep.
1353 * @order: Power of two of allocation size in pages. 0 is a single page.
1354 *
1355 * Allocate a page from the kernel page pool. When not in
1356 * interrupt context and apply the current process NUMA policy.
1357 * Returns NULL when no page can be allocated.
1358 *
1359 * Don't call cpuset_update_task_memory_state() unless
1360 * 1) it's ok to take cpuset_sem (can WAIT), and
1361 * 2) allocating for current task (not interrupt).
1362 */
1363 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1364 {
1365 struct mempolicy *pol = current->mempolicy;
1366
1367 if ((gfp & __GFP_WAIT) && !in_interrupt())
1368 cpuset_update_task_memory_state();
1369 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1370 pol = &default_policy;
1371 if (pol->policy == MPOL_INTERLEAVE)
1372 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1373 return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
1374 }
1375 EXPORT_SYMBOL(alloc_pages_current);
1376
1377 /*
1378 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
1379 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1380 * with the mems_allowed returned by cpuset_mems_allowed(). This
1381 * keeps mempolicies cpuset relative after its cpuset moves. See
1382 * further kernel/cpuset.c update_nodemask().
1383 */
1384 void *cpuset_being_rebound;
1385
1386 /* Slow path of a mempolicy copy */
1387 struct mempolicy *__mpol_copy(struct mempolicy *old)
1388 {
1389 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1390
1391 if (!new)
1392 return ERR_PTR(-ENOMEM);
1393 if (current_cpuset_is_being_rebound()) {
1394 nodemask_t mems = cpuset_mems_allowed(current);
1395 mpol_rebind_policy(old, &mems);
1396 }
1397 *new = *old;
1398 atomic_set(&new->refcnt, 1);
1399 if (new->policy == MPOL_BIND) {
1400 int sz = ksize(old->v.zonelist);
1401 new->v.zonelist = kmemdup(old->v.zonelist, sz, GFP_KERNEL);
1402 if (!new->v.zonelist) {
1403 kmem_cache_free(policy_cache, new);
1404 return ERR_PTR(-ENOMEM);
1405 }
1406 }
1407 return new;
1408 }
1409
1410 /* Slow path of a mempolicy comparison */
1411 int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1412 {
1413 if (!a || !b)
1414 return 0;
1415 if (a->policy != b->policy)
1416 return 0;
1417 switch (a->policy) {
1418 case MPOL_DEFAULT:
1419 return 1;
1420 case MPOL_INTERLEAVE:
1421 return nodes_equal(a->v.nodes, b->v.nodes);
1422 case MPOL_PREFERRED:
1423 return a->v.preferred_node == b->v.preferred_node;
1424 case MPOL_BIND: {
1425 int i;
1426 for (i = 0; a->v.zonelist->zones[i]; i++)
1427 if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i])
1428 return 0;
1429 return b->v.zonelist->zones[i] == NULL;
1430 }
1431 default:
1432 BUG();
1433 return 0;
1434 }
1435 }
1436
1437 /* Slow path of a mpol destructor. */
1438 void __mpol_free(struct mempolicy *p)
1439 {
1440 if (!atomic_dec_and_test(&p->refcnt))
1441 return;
1442 if (p->policy == MPOL_BIND)
1443 kfree(p->v.zonelist);
1444 p->policy = MPOL_DEFAULT;
1445 kmem_cache_free(policy_cache, p);
1446 }
1447
1448 /*
1449 * Shared memory backing store policy support.
1450 *
1451 * Remember policies even when nobody has shared memory mapped.
1452 * The policies are kept in Red-Black tree linked from the inode.
1453 * They are protected by the sp->lock spinlock, which should be held
1454 * for any accesses to the tree.
1455 */
1456
1457 /* lookup first element intersecting start-end */
1458 /* Caller holds sp->lock */
1459 static struct sp_node *
1460 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1461 {
1462 struct rb_node *n = sp->root.rb_node;
1463
1464 while (n) {
1465 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1466
1467 if (start >= p->end)
1468 n = n->rb_right;
1469 else if (end <= p->start)
1470 n = n->rb_left;
1471 else
1472 break;
1473 }
1474 if (!n)
1475 return NULL;
1476 for (;;) {
1477 struct sp_node *w = NULL;
1478 struct rb_node *prev = rb_prev(n);
1479 if (!prev)
1480 break;
1481 w = rb_entry(prev, struct sp_node, nd);
1482 if (w->end <= start)
1483 break;
1484 n = prev;
1485 }
1486 return rb_entry(n, struct sp_node, nd);
1487 }
1488
1489 /* Insert a new shared policy into the list. */
1490 /* Caller holds sp->lock */
1491 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1492 {
1493 struct rb_node **p = &sp->root.rb_node;
1494 struct rb_node *parent = NULL;
1495 struct sp_node *nd;
1496
1497 while (*p) {
1498 parent = *p;
1499 nd = rb_entry(parent, struct sp_node, nd);
1500 if (new->start < nd->start)
1501 p = &(*p)->rb_left;
1502 else if (new->end > nd->end)
1503 p = &(*p)->rb_right;
1504 else
1505 BUG();
1506 }
1507 rb_link_node(&new->nd, parent, p);
1508 rb_insert_color(&new->nd, &sp->root);
1509 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
1510 new->policy ? new->policy->policy : 0);
1511 }
1512
1513 /* Find shared policy intersecting idx */
1514 struct mempolicy *
1515 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1516 {
1517 struct mempolicy *pol = NULL;
1518 struct sp_node *sn;
1519
1520 if (!sp->root.rb_node)
1521 return NULL;
1522 spin_lock(&sp->lock);
1523 sn = sp_lookup(sp, idx, idx+1);
1524 if (sn) {
1525 mpol_get(sn->policy);
1526 pol = sn->policy;
1527 }
1528 spin_unlock(&sp->lock);
1529 return pol;
1530 }
1531
1532 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1533 {
1534 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1535 rb_erase(&n->nd, &sp->root);
1536 mpol_free(n->policy);
1537 kmem_cache_free(sn_cache, n);
1538 }
1539
1540 struct sp_node *
1541 sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol)
1542 {
1543 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1544
1545 if (!n)
1546 return NULL;
1547 n->start = start;
1548 n->end = end;
1549 mpol_get(pol);
1550 n->policy = pol;
1551 return n;
1552 }
1553
1554 /* Replace a policy range. */
1555 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1556 unsigned long end, struct sp_node *new)
1557 {
1558 struct sp_node *n, *new2 = NULL;
1559
1560 restart:
1561 spin_lock(&sp->lock);
1562 n = sp_lookup(sp, start, end);
1563 /* Take care of old policies in the same range. */
1564 while (n && n->start < end) {
1565 struct rb_node *next = rb_next(&n->nd);
1566 if (n->start >= start) {
1567 if (n->end <= end)
1568 sp_delete(sp, n);
1569 else
1570 n->start = end;
1571 } else {
1572 /* Old policy spanning whole new range. */
1573 if (n->end > end) {
1574 if (!new2) {
1575 spin_unlock(&sp->lock);
1576 new2 = sp_alloc(end, n->end, n->policy);
1577 if (!new2)
1578 return -ENOMEM;
1579 goto restart;
1580 }
1581 n->end = start;
1582 sp_insert(sp, new2);
1583 new2 = NULL;
1584 break;
1585 } else
1586 n->end = start;
1587 }
1588 if (!next)
1589 break;
1590 n = rb_entry(next, struct sp_node, nd);
1591 }
1592 if (new)
1593 sp_insert(sp, new);
1594 spin_unlock(&sp->lock);
1595 if (new2) {
1596 mpol_free(new2->policy);
1597 kmem_cache_free(sn_cache, new2);
1598 }
1599 return 0;
1600 }
1601
1602 void mpol_shared_policy_init(struct shared_policy *info, int policy,
1603 nodemask_t *policy_nodes)
1604 {
1605 info->root = RB_ROOT;
1606 spin_lock_init(&info->lock);
1607
1608 if (policy != MPOL_DEFAULT) {
1609 struct mempolicy *newpol;
1610
1611 /* Falls back to MPOL_DEFAULT on any error */
1612 newpol = mpol_new(policy, policy_nodes);
1613 if (!IS_ERR(newpol)) {
1614 /* Create pseudo-vma that contains just the policy */
1615 struct vm_area_struct pvma;
1616
1617 memset(&pvma, 0, sizeof(struct vm_area_struct));
1618 /* Policy covers entire file */
1619 pvma.vm_end = TASK_SIZE;
1620 mpol_set_shared_policy(info, &pvma, newpol);
1621 mpol_free(newpol);
1622 }
1623 }
1624 }
1625
1626 int mpol_set_shared_policy(struct shared_policy *info,
1627 struct vm_area_struct *vma, struct mempolicy *npol)
1628 {
1629 int err;
1630 struct sp_node *new = NULL;
1631 unsigned long sz = vma_pages(vma);
1632
1633 pr_debug("set_shared_policy %lx sz %lu %d %lx\n",
1634 vma->vm_pgoff,
1635 sz, npol? npol->policy : -1,
1636 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1637
1638 if (npol) {
1639 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1640 if (!new)
1641 return -ENOMEM;
1642 }
1643 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1644 if (err && new)
1645 kmem_cache_free(sn_cache, new);
1646 return err;
1647 }
1648
1649 /* Free a backing policy store on inode delete. */
1650 void mpol_free_shared_policy(struct shared_policy *p)
1651 {
1652 struct sp_node *n;
1653 struct rb_node *next;
1654
1655 if (!p->root.rb_node)
1656 return;
1657 spin_lock(&p->lock);
1658 next = rb_first(&p->root);
1659 while (next) {
1660 n = rb_entry(next, struct sp_node, nd);
1661 next = rb_next(&n->nd);
1662 rb_erase(&n->nd, &p->root);
1663 mpol_free(n->policy);
1664 kmem_cache_free(sn_cache, n);
1665 }
1666 spin_unlock(&p->lock);
1667 }
1668
1669 /* assumes fs == KERNEL_DS */
1670 void __init numa_policy_init(void)
1671 {
1672 nodemask_t interleave_nodes;
1673 unsigned long largest = 0;
1674 int nid, prefer = 0;
1675
1676 policy_cache = kmem_cache_create("numa_policy",
1677 sizeof(struct mempolicy),
1678 0, SLAB_PANIC, NULL);
1679
1680 sn_cache = kmem_cache_create("shared_policy_node",
1681 sizeof(struct sp_node),
1682 0, SLAB_PANIC, NULL);
1683
1684 /*
1685 * Set interleaving policy for system init. Interleaving is only
1686 * enabled across suitably sized nodes (default is >= 16MB), or
1687 * fall back to the largest node if they're all smaller.
1688 */
1689 nodes_clear(interleave_nodes);
1690 for_each_node_state(nid, N_HIGH_MEMORY) {
1691 unsigned long total_pages = node_present_pages(nid);
1692
1693 /* Preserve the largest node */
1694 if (largest < total_pages) {
1695 largest = total_pages;
1696 prefer = nid;
1697 }
1698
1699 /* Interleave this node? */
1700 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1701 node_set(nid, interleave_nodes);
1702 }
1703
1704 /* All too small, use the largest */
1705 if (unlikely(nodes_empty(interleave_nodes)))
1706 node_set(prefer, interleave_nodes);
1707
1708 if (do_set_mempolicy(MPOL_INTERLEAVE, &interleave_nodes))
1709 printk("numa_policy_init: interleaving failed\n");
1710 }
1711
1712 /* Reset policy of current process to default */
1713 void numa_default_policy(void)
1714 {
1715 do_set_mempolicy(MPOL_DEFAULT, NULL);
1716 }
1717
1718 /* Migrate a policy to a different set of nodes */
1719 void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
1720 {
1721 nodemask_t *mpolmask;
1722 nodemask_t tmp;
1723
1724 if (!pol)
1725 return;
1726 mpolmask = &pol->cpuset_mems_allowed;
1727 if (nodes_equal(*mpolmask, *newmask))
1728 return;
1729
1730 switch (pol->policy) {
1731 case MPOL_DEFAULT:
1732 break;
1733 case MPOL_INTERLEAVE:
1734 nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
1735 pol->v.nodes = tmp;
1736 *mpolmask = *newmask;
1737 current->il_next = node_remap(current->il_next,
1738 *mpolmask, *newmask);
1739 break;
1740 case MPOL_PREFERRED:
1741 pol->v.preferred_node = node_remap(pol->v.preferred_node,
1742 *mpolmask, *newmask);
1743 *mpolmask = *newmask;
1744 break;
1745 case MPOL_BIND: {
1746 nodemask_t nodes;
1747 struct zone **z;
1748 struct zonelist *zonelist;
1749
1750 nodes_clear(nodes);
1751 for (z = pol->v.zonelist->zones; *z; z++)
1752 node_set(zone_to_nid(*z), nodes);
1753 nodes_remap(tmp, nodes, *mpolmask, *newmask);
1754 nodes = tmp;
1755
1756 zonelist = bind_zonelist(&nodes);
1757
1758 /* If no mem, then zonelist is NULL and we keep old zonelist.
1759 * If that old zonelist has no remaining mems_allowed nodes,
1760 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
1761 */
1762
1763 if (!IS_ERR(zonelist)) {
1764 /* Good - got mem - substitute new zonelist */
1765 kfree(pol->v.zonelist);
1766 pol->v.zonelist = zonelist;
1767 }
1768 *mpolmask = *newmask;
1769 break;
1770 }
1771 default:
1772 BUG();
1773 break;
1774 }
1775 }
1776
1777 /*
1778 * Wrapper for mpol_rebind_policy() that just requires task
1779 * pointer, and updates task mempolicy.
1780 */
1781
1782 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
1783 {
1784 mpol_rebind_policy(tsk->mempolicy, new);
1785 }
1786
1787 /*
1788 * Rebind each vma in mm to new nodemask.
1789 *
1790 * Call holding a reference to mm. Takes mm->mmap_sem during call.
1791 */
1792
1793 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
1794 {
1795 struct vm_area_struct *vma;
1796
1797 down_write(&mm->mmap_sem);
1798 for (vma = mm->mmap; vma; vma = vma->vm_next)
1799 mpol_rebind_policy(vma->vm_policy, new);
1800 up_write(&mm->mmap_sem);
1801 }
1802
1803 /*
1804 * Display pages allocated per node and memory policy via /proc.
1805 */
1806
1807 static const char * const policy_types[] =
1808 { "default", "prefer", "bind", "interleave" };
1809
1810 /*
1811 * Convert a mempolicy into a string.
1812 * Returns the number of characters in buffer (if positive)
1813 * or an error (negative)
1814 */
1815 static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1816 {
1817 char *p = buffer;
1818 int l;
1819 nodemask_t nodes;
1820 int mode = pol ? pol->policy : MPOL_DEFAULT;
1821
1822 switch (mode) {
1823 case MPOL_DEFAULT:
1824 nodes_clear(nodes);
1825 break;
1826
1827 case MPOL_PREFERRED:
1828 nodes_clear(nodes);
1829 node_set(pol->v.preferred_node, nodes);
1830 break;
1831
1832 case MPOL_BIND:
1833 get_zonemask(pol, &nodes);
1834 break;
1835
1836 case MPOL_INTERLEAVE:
1837 nodes = pol->v.nodes;
1838 break;
1839
1840 default:
1841 BUG();
1842 return -EFAULT;
1843 }
1844
1845 l = strlen(policy_types[mode]);
1846 if (buffer + maxlen < p + l + 1)
1847 return -ENOSPC;
1848
1849 strcpy(p, policy_types[mode]);
1850 p += l;
1851
1852 if (!nodes_empty(nodes)) {
1853 if (buffer + maxlen < p + 2)
1854 return -ENOSPC;
1855 *p++ = '=';
1856 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
1857 }
1858 return p - buffer;
1859 }
1860
1861 struct numa_maps {
1862 unsigned long pages;
1863 unsigned long anon;
1864 unsigned long active;
1865 unsigned long writeback;
1866 unsigned long mapcount_max;
1867 unsigned long dirty;
1868 unsigned long swapcache;
1869 unsigned long node[MAX_NUMNODES];
1870 };
1871
1872 static void gather_stats(struct page *page, void *private, int pte_dirty)
1873 {
1874 struct numa_maps *md = private;
1875 int count = page_mapcount(page);
1876
1877 md->pages++;
1878 if (pte_dirty || PageDirty(page))
1879 md->dirty++;
1880
1881 if (PageSwapCache(page))
1882 md->swapcache++;
1883
1884 if (PageActive(page))
1885 md->active++;
1886
1887 if (PageWriteback(page))
1888 md->writeback++;
1889
1890 if (PageAnon(page))
1891 md->anon++;
1892
1893 if (count > md->mapcount_max)
1894 md->mapcount_max = count;
1895
1896 md->node[page_to_nid(page)]++;
1897 }
1898
1899 #ifdef CONFIG_HUGETLB_PAGE
1900 static void check_huge_range(struct vm_area_struct *vma,
1901 unsigned long start, unsigned long end,
1902 struct numa_maps *md)
1903 {
1904 unsigned long addr;
1905 struct page *page;
1906
1907 for (addr = start; addr < end; addr += HPAGE_SIZE) {
1908 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1909 pte_t pte;
1910
1911 if (!ptep)
1912 continue;
1913
1914 pte = *ptep;
1915 if (pte_none(pte))
1916 continue;
1917
1918 page = pte_page(pte);
1919 if (!page)
1920 continue;
1921
1922 gather_stats(page, md, pte_dirty(*ptep));
1923 }
1924 }
1925 #else
1926 static inline void check_huge_range(struct vm_area_struct *vma,
1927 unsigned long start, unsigned long end,
1928 struct numa_maps *md)
1929 {
1930 }
1931 #endif
1932
1933 int show_numa_map(struct seq_file *m, void *v)
1934 {
1935 struct proc_maps_private *priv = m->private;
1936 struct vm_area_struct *vma = v;
1937 struct numa_maps *md;
1938 struct file *file = vma->vm_file;
1939 struct mm_struct *mm = vma->vm_mm;
1940 struct mempolicy *pol;
1941 int n;
1942 char buffer[50];
1943
1944 if (!mm)
1945 return 0;
1946
1947 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
1948 if (!md)
1949 return 0;
1950
1951 pol = get_vma_policy(priv->task, vma, vma->vm_start);
1952 mpol_to_str(buffer, sizeof(buffer), pol);
1953 /*
1954 * unref shared or other task's mempolicy
1955 */
1956 if (pol != &default_policy && pol != current->mempolicy)
1957 __mpol_free(pol);
1958
1959 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1960
1961 if (file) {
1962 seq_printf(m, " file=");
1963 seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n\t= ");
1964 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1965 seq_printf(m, " heap");
1966 } else if (vma->vm_start <= mm->start_stack &&
1967 vma->vm_end >= mm->start_stack) {
1968 seq_printf(m, " stack");
1969 }
1970
1971 if (is_vm_hugetlb_page(vma)) {
1972 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
1973 seq_printf(m, " huge");
1974 } else {
1975 check_pgd_range(vma, vma->vm_start, vma->vm_end,
1976 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
1977 }
1978
1979 if (!md->pages)
1980 goto out;
1981
1982 if (md->anon)
1983 seq_printf(m," anon=%lu",md->anon);
1984
1985 if (md->dirty)
1986 seq_printf(m," dirty=%lu",md->dirty);
1987
1988 if (md->pages != md->anon && md->pages != md->dirty)
1989 seq_printf(m, " mapped=%lu", md->pages);
1990
1991 if (md->mapcount_max > 1)
1992 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1993
1994 if (md->swapcache)
1995 seq_printf(m," swapcache=%lu", md->swapcache);
1996
1997 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1998 seq_printf(m," active=%lu", md->active);
1999
2000 if (md->writeback)
2001 seq_printf(m," writeback=%lu", md->writeback);
2002
2003 for_each_node_state(n, N_HIGH_MEMORY)
2004 if (md->node[n])
2005 seq_printf(m, " N%d=%lu", n, md->node[n]);
2006 out:
2007 seq_putc(m, '\n');
2008 kfree(md);
2009
2010 if (m->count < m->size)
2011 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
2012 return 0;
2013 }
2014
This page took 0.068309 seconds and 6 git commands to generate.