Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[deliverable/linux.git] / fs / proc / task_mmu.c
1 #include <linux/mm.h>
2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/ptrace.h>
7 #include <linux/slab.h>
8 #include <linux/pagemap.h>
9 #include <linux/mempolicy.h>
10 #include <linux/swap.h>
11 #include <linux/swapops.h>
12
13 #include <asm/elf.h>
14 #include <asm/uaccess.h>
15 #include <asm/tlbflush.h>
16 #include "internal.h"
17
18 void task_mem(struct seq_file *m, struct mm_struct *mm)
19 {
20 unsigned long data, text, lib, swap;
21 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
22
23 /*
24 * Note: to minimize their overhead, mm maintains hiwater_vm and
25 * hiwater_rss only when about to *lower* total_vm or rss. Any
26 * collector of these hiwater stats must therefore get total_vm
27 * and rss too, which will usually be the higher. Barriers? not
28 * worth the effort, such snapshots can always be inconsistent.
29 */
30 hiwater_vm = total_vm = mm->total_vm;
31 if (hiwater_vm < mm->hiwater_vm)
32 hiwater_vm = mm->hiwater_vm;
33 hiwater_rss = total_rss = get_mm_rss(mm);
34 if (hiwater_rss < mm->hiwater_rss)
35 hiwater_rss = mm->hiwater_rss;
36
37 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
38 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
39 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
40 swap = get_mm_counter(mm, MM_SWAPENTS);
41 seq_printf(m,
42 "VmPeak:\t%8lu kB\n"
43 "VmSize:\t%8lu kB\n"
44 "VmLck:\t%8lu kB\n"
45 "VmHWM:\t%8lu kB\n"
46 "VmRSS:\t%8lu kB\n"
47 "VmData:\t%8lu kB\n"
48 "VmStk:\t%8lu kB\n"
49 "VmExe:\t%8lu kB\n"
50 "VmLib:\t%8lu kB\n"
51 "VmPTE:\t%8lu kB\n"
52 "VmSwap:\t%8lu kB\n",
53 hiwater_vm << (PAGE_SHIFT-10),
54 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
55 mm->locked_vm << (PAGE_SHIFT-10),
56 hiwater_rss << (PAGE_SHIFT-10),
57 total_rss << (PAGE_SHIFT-10),
58 data << (PAGE_SHIFT-10),
59 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
60 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
61 swap << (PAGE_SHIFT-10));
62 }
63
64 unsigned long task_vsize(struct mm_struct *mm)
65 {
66 return PAGE_SIZE * mm->total_vm;
67 }
68
69 int task_statm(struct mm_struct *mm, int *shared, int *text,
70 int *data, int *resident)
71 {
72 *shared = get_mm_counter(mm, MM_FILEPAGES);
73 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
74 >> PAGE_SHIFT;
75 *data = mm->total_vm - mm->shared_vm;
76 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
77 return mm->total_vm;
78 }
79
80 static void pad_len_spaces(struct seq_file *m, int len)
81 {
82 len = 25 + sizeof(void*) * 6 - len;
83 if (len < 1)
84 len = 1;
85 seq_printf(m, "%*c", len, ' ');
86 }
87
88 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
89 {
90 if (vma && vma != priv->tail_vma) {
91 struct mm_struct *mm = vma->vm_mm;
92 up_read(&mm->mmap_sem);
93 mmput(mm);
94 }
95 }
96
97 static void *m_start(struct seq_file *m, loff_t *pos)
98 {
99 struct proc_maps_private *priv = m->private;
100 unsigned long last_addr = m->version;
101 struct mm_struct *mm;
102 struct vm_area_struct *vma, *tail_vma = NULL;
103 loff_t l = *pos;
104
105 /* Clear the per syscall fields in priv */
106 priv->task = NULL;
107 priv->tail_vma = NULL;
108
109 /*
110 * We remember last_addr rather than next_addr to hit with
111 * mmap_cache most of the time. We have zero last_addr at
112 * the beginning and also after lseek. We will have -1 last_addr
113 * after the end of the vmas.
114 */
115
116 if (last_addr == -1UL)
117 return NULL;
118
119 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
120 if (!priv->task)
121 return NULL;
122
123 mm = mm_for_maps(priv->task);
124 if (!mm)
125 return NULL;
126 down_read(&mm->mmap_sem);
127
128 tail_vma = get_gate_vma(priv->task);
129 priv->tail_vma = tail_vma;
130
131 /* Start with last addr hint */
132 vma = find_vma(mm, last_addr);
133 if (last_addr && vma) {
134 vma = vma->vm_next;
135 goto out;
136 }
137
138 /*
139 * Check the vma index is within the range and do
140 * sequential scan until m_index.
141 */
142 vma = NULL;
143 if ((unsigned long)l < mm->map_count) {
144 vma = mm->mmap;
145 while (l-- && vma)
146 vma = vma->vm_next;
147 goto out;
148 }
149
150 if (l != mm->map_count)
151 tail_vma = NULL; /* After gate vma */
152
153 out:
154 if (vma)
155 return vma;
156
157 /* End of vmas has been reached */
158 m->version = (tail_vma != NULL)? 0: -1UL;
159 up_read(&mm->mmap_sem);
160 mmput(mm);
161 return tail_vma;
162 }
163
164 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
165 {
166 struct proc_maps_private *priv = m->private;
167 struct vm_area_struct *vma = v;
168 struct vm_area_struct *tail_vma = priv->tail_vma;
169
170 (*pos)++;
171 if (vma && (vma != tail_vma) && vma->vm_next)
172 return vma->vm_next;
173 vma_stop(priv, vma);
174 return (vma != tail_vma)? tail_vma: NULL;
175 }
176
177 static void m_stop(struct seq_file *m, void *v)
178 {
179 struct proc_maps_private *priv = m->private;
180 struct vm_area_struct *vma = v;
181
182 vma_stop(priv, vma);
183 if (priv->task)
184 put_task_struct(priv->task);
185 }
186
187 static int do_maps_open(struct inode *inode, struct file *file,
188 const struct seq_operations *ops)
189 {
190 struct proc_maps_private *priv;
191 int ret = -ENOMEM;
192 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
193 if (priv) {
194 priv->pid = proc_pid(inode);
195 ret = seq_open(file, ops);
196 if (!ret) {
197 struct seq_file *m = file->private_data;
198 m->private = priv;
199 } else {
200 kfree(priv);
201 }
202 }
203 return ret;
204 }
205
206 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
207 {
208 struct mm_struct *mm = vma->vm_mm;
209 struct file *file = vma->vm_file;
210 int flags = vma->vm_flags;
211 unsigned long ino = 0;
212 unsigned long long pgoff = 0;
213 unsigned long start;
214 dev_t dev = 0;
215 int len;
216
217 if (file) {
218 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
219 dev = inode->i_sb->s_dev;
220 ino = inode->i_ino;
221 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
222 }
223
224 /* We don't show the stack guard page in /proc/maps */
225 start = vma->vm_start;
226 if (vma->vm_flags & VM_GROWSDOWN)
227 if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
228 start += PAGE_SIZE;
229
230 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
231 start,
232 vma->vm_end,
233 flags & VM_READ ? 'r' : '-',
234 flags & VM_WRITE ? 'w' : '-',
235 flags & VM_EXEC ? 'x' : '-',
236 flags & VM_MAYSHARE ? 's' : 'p',
237 pgoff,
238 MAJOR(dev), MINOR(dev), ino, &len);
239
240 /*
241 * Print the dentry name for named mappings, and a
242 * special [heap] marker for the heap:
243 */
244 if (file) {
245 pad_len_spaces(m, len);
246 seq_path(m, &file->f_path, "\n");
247 } else {
248 const char *name = arch_vma_name(vma);
249 if (!name) {
250 if (mm) {
251 if (vma->vm_start <= mm->start_brk &&
252 vma->vm_end >= mm->brk) {
253 name = "[heap]";
254 } else if (vma->vm_start <= mm->start_stack &&
255 vma->vm_end >= mm->start_stack) {
256 name = "[stack]";
257 }
258 } else {
259 name = "[vdso]";
260 }
261 }
262 if (name) {
263 pad_len_spaces(m, len);
264 seq_puts(m, name);
265 }
266 }
267 seq_putc(m, '\n');
268 }
269
270 static int show_map(struct seq_file *m, void *v)
271 {
272 struct vm_area_struct *vma = v;
273 struct proc_maps_private *priv = m->private;
274 struct task_struct *task = priv->task;
275
276 show_map_vma(m, vma);
277
278 if (m->count < m->size) /* vma is copied successfully */
279 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
280 return 0;
281 }
282
283 static const struct seq_operations proc_pid_maps_op = {
284 .start = m_start,
285 .next = m_next,
286 .stop = m_stop,
287 .show = show_map
288 };
289
290 static int maps_open(struct inode *inode, struct file *file)
291 {
292 return do_maps_open(inode, file, &proc_pid_maps_op);
293 }
294
295 const struct file_operations proc_maps_operations = {
296 .open = maps_open,
297 .read = seq_read,
298 .llseek = seq_lseek,
299 .release = seq_release_private,
300 };
301
302 /*
303 * Proportional Set Size(PSS): my share of RSS.
304 *
305 * PSS of a process is the count of pages it has in memory, where each
306 * page is divided by the number of processes sharing it. So if a
307 * process has 1000 pages all to itself, and 1000 shared with one other
308 * process, its PSS will be 1500.
309 *
310 * To keep (accumulated) division errors low, we adopt a 64bit
311 * fixed-point pss counter to minimize division errors. So (pss >>
312 * PSS_SHIFT) would be the real byte count.
313 *
314 * A shift of 12 before division means (assuming 4K page size):
315 * - 1M 3-user-pages add up to 8KB errors;
316 * - supports mapcount up to 2^24, or 16M;
317 * - supports PSS up to 2^52 bytes, or 4PB.
318 */
319 #define PSS_SHIFT 12
320
321 #ifdef CONFIG_PROC_PAGE_MONITOR
322 struct mem_size_stats {
323 struct vm_area_struct *vma;
324 unsigned long resident;
325 unsigned long shared_clean;
326 unsigned long shared_dirty;
327 unsigned long private_clean;
328 unsigned long private_dirty;
329 unsigned long referenced;
330 unsigned long anonymous;
331 unsigned long swap;
332 u64 pss;
333 };
334
335 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
336 struct mm_walk *walk)
337 {
338 struct mem_size_stats *mss = walk->private;
339 struct vm_area_struct *vma = mss->vma;
340 pte_t *pte, ptent;
341 spinlock_t *ptl;
342 struct page *page;
343 int mapcount;
344
345 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
346 for (; addr != end; pte++, addr += PAGE_SIZE) {
347 ptent = *pte;
348
349 if (is_swap_pte(ptent)) {
350 mss->swap += PAGE_SIZE;
351 continue;
352 }
353
354 if (!pte_present(ptent))
355 continue;
356
357 page = vm_normal_page(vma, addr, ptent);
358 if (!page)
359 continue;
360
361 if (PageAnon(page))
362 mss->anonymous += PAGE_SIZE;
363
364 mss->resident += PAGE_SIZE;
365 /* Accumulate the size in pages that have been accessed. */
366 if (pte_young(ptent) || PageReferenced(page))
367 mss->referenced += PAGE_SIZE;
368 mapcount = page_mapcount(page);
369 if (mapcount >= 2) {
370 if (pte_dirty(ptent) || PageDirty(page))
371 mss->shared_dirty += PAGE_SIZE;
372 else
373 mss->shared_clean += PAGE_SIZE;
374 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
375 } else {
376 if (pte_dirty(ptent) || PageDirty(page))
377 mss->private_dirty += PAGE_SIZE;
378 else
379 mss->private_clean += PAGE_SIZE;
380 mss->pss += (PAGE_SIZE << PSS_SHIFT);
381 }
382 }
383 pte_unmap_unlock(pte - 1, ptl);
384 cond_resched();
385 return 0;
386 }
387
388 static int show_smap(struct seq_file *m, void *v)
389 {
390 struct proc_maps_private *priv = m->private;
391 struct task_struct *task = priv->task;
392 struct vm_area_struct *vma = v;
393 struct mem_size_stats mss;
394 struct mm_walk smaps_walk = {
395 .pmd_entry = smaps_pte_range,
396 .mm = vma->vm_mm,
397 .private = &mss,
398 };
399
400 memset(&mss, 0, sizeof mss);
401 mss.vma = vma;
402 /* mmap_sem is held in m_start */
403 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
404 walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
405
406 show_map_vma(m, vma);
407
408 seq_printf(m,
409 "Size: %8lu kB\n"
410 "Rss: %8lu kB\n"
411 "Pss: %8lu kB\n"
412 "Shared_Clean: %8lu kB\n"
413 "Shared_Dirty: %8lu kB\n"
414 "Private_Clean: %8lu kB\n"
415 "Private_Dirty: %8lu kB\n"
416 "Referenced: %8lu kB\n"
417 "Anonymous: %8lu kB\n"
418 "Swap: %8lu kB\n"
419 "KernelPageSize: %8lu kB\n"
420 "MMUPageSize: %8lu kB\n",
421 (vma->vm_end - vma->vm_start) >> 10,
422 mss.resident >> 10,
423 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
424 mss.shared_clean >> 10,
425 mss.shared_dirty >> 10,
426 mss.private_clean >> 10,
427 mss.private_dirty >> 10,
428 mss.referenced >> 10,
429 mss.anonymous >> 10,
430 mss.swap >> 10,
431 vma_kernel_pagesize(vma) >> 10,
432 vma_mmu_pagesize(vma) >> 10);
433
434 if (m->count < m->size) /* vma is copied successfully */
435 m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
436 return 0;
437 }
438
439 static const struct seq_operations proc_pid_smaps_op = {
440 .start = m_start,
441 .next = m_next,
442 .stop = m_stop,
443 .show = show_smap
444 };
445
446 static int smaps_open(struct inode *inode, struct file *file)
447 {
448 return do_maps_open(inode, file, &proc_pid_smaps_op);
449 }
450
451 const struct file_operations proc_smaps_operations = {
452 .open = smaps_open,
453 .read = seq_read,
454 .llseek = seq_lseek,
455 .release = seq_release_private,
456 };
457
458 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
459 unsigned long end, struct mm_walk *walk)
460 {
461 struct vm_area_struct *vma = walk->private;
462 pte_t *pte, ptent;
463 spinlock_t *ptl;
464 struct page *page;
465
466 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
467 for (; addr != end; pte++, addr += PAGE_SIZE) {
468 ptent = *pte;
469 if (!pte_present(ptent))
470 continue;
471
472 page = vm_normal_page(vma, addr, ptent);
473 if (!page)
474 continue;
475
476 /* Clear accessed and referenced bits. */
477 ptep_test_and_clear_young(vma, addr, pte);
478 ClearPageReferenced(page);
479 }
480 pte_unmap_unlock(pte - 1, ptl);
481 cond_resched();
482 return 0;
483 }
484
485 #define CLEAR_REFS_ALL 1
486 #define CLEAR_REFS_ANON 2
487 #define CLEAR_REFS_MAPPED 3
488
489 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
490 size_t count, loff_t *ppos)
491 {
492 struct task_struct *task;
493 char buffer[PROC_NUMBUF];
494 struct mm_struct *mm;
495 struct vm_area_struct *vma;
496 long type;
497
498 memset(buffer, 0, sizeof(buffer));
499 if (count > sizeof(buffer) - 1)
500 count = sizeof(buffer) - 1;
501 if (copy_from_user(buffer, buf, count))
502 return -EFAULT;
503 if (strict_strtol(strstrip(buffer), 10, &type))
504 return -EINVAL;
505 if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
506 return -EINVAL;
507 task = get_proc_task(file->f_path.dentry->d_inode);
508 if (!task)
509 return -ESRCH;
510 mm = get_task_mm(task);
511 if (mm) {
512 struct mm_walk clear_refs_walk = {
513 .pmd_entry = clear_refs_pte_range,
514 .mm = mm,
515 };
516 down_read(&mm->mmap_sem);
517 for (vma = mm->mmap; vma; vma = vma->vm_next) {
518 clear_refs_walk.private = vma;
519 if (is_vm_hugetlb_page(vma))
520 continue;
521 /*
522 * Writing 1 to /proc/pid/clear_refs affects all pages.
523 *
524 * Writing 2 to /proc/pid/clear_refs only affects
525 * Anonymous pages.
526 *
527 * Writing 3 to /proc/pid/clear_refs only affects file
528 * mapped pages.
529 */
530 if (type == CLEAR_REFS_ANON && vma->vm_file)
531 continue;
532 if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
533 continue;
534 walk_page_range(vma->vm_start, vma->vm_end,
535 &clear_refs_walk);
536 }
537 flush_tlb_mm(mm);
538 up_read(&mm->mmap_sem);
539 mmput(mm);
540 }
541 put_task_struct(task);
542
543 return count;
544 }
545
546 const struct file_operations proc_clear_refs_operations = {
547 .write = clear_refs_write,
548 .llseek = noop_llseek,
549 };
550
551 struct pagemapread {
552 int pos, len;
553 u64 *buffer;
554 };
555
556 #define PM_ENTRY_BYTES sizeof(u64)
557 #define PM_STATUS_BITS 3
558 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
559 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
560 #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
561 #define PM_PSHIFT_BITS 6
562 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
563 #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
564 #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
565 #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
566 #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
567
568 #define PM_PRESENT PM_STATUS(4LL)
569 #define PM_SWAP PM_STATUS(2LL)
570 #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
571 #define PM_END_OF_BUFFER 1
572
573 static int add_to_pagemap(unsigned long addr, u64 pfn,
574 struct pagemapread *pm)
575 {
576 pm->buffer[pm->pos++] = pfn;
577 if (pm->pos >= pm->len)
578 return PM_END_OF_BUFFER;
579 return 0;
580 }
581
582 static int pagemap_pte_hole(unsigned long start, unsigned long end,
583 struct mm_walk *walk)
584 {
585 struct pagemapread *pm = walk->private;
586 unsigned long addr;
587 int err = 0;
588 for (addr = start; addr < end; addr += PAGE_SIZE) {
589 err = add_to_pagemap(addr, PM_NOT_PRESENT, pm);
590 if (err)
591 break;
592 }
593 return err;
594 }
595
596 static u64 swap_pte_to_pagemap_entry(pte_t pte)
597 {
598 swp_entry_t e = pte_to_swp_entry(pte);
599 return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
600 }
601
602 static u64 pte_to_pagemap_entry(pte_t pte)
603 {
604 u64 pme = 0;
605 if (is_swap_pte(pte))
606 pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
607 | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
608 else if (pte_present(pte))
609 pme = PM_PFRAME(pte_pfn(pte))
610 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
611 return pme;
612 }
613
614 static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
615 struct mm_walk *walk)
616 {
617 struct vm_area_struct *vma;
618 struct pagemapread *pm = walk->private;
619 pte_t *pte;
620 int err = 0;
621
622 /* find the first VMA at or above 'addr' */
623 vma = find_vma(walk->mm, addr);
624 for (; addr != end; addr += PAGE_SIZE) {
625 u64 pfn = PM_NOT_PRESENT;
626
627 /* check to see if we've left 'vma' behind
628 * and need a new, higher one */
629 if (vma && (addr >= vma->vm_end))
630 vma = find_vma(walk->mm, addr);
631
632 /* check that 'vma' actually covers this address,
633 * and that it isn't a huge page vma */
634 if (vma && (vma->vm_start <= addr) &&
635 !is_vm_hugetlb_page(vma)) {
636 pte = pte_offset_map(pmd, addr);
637 pfn = pte_to_pagemap_entry(*pte);
638 /* unmap before userspace copy */
639 pte_unmap(pte);
640 }
641 err = add_to_pagemap(addr, pfn, pm);
642 if (err)
643 return err;
644 }
645
646 cond_resched();
647
648 return err;
649 }
650
651 #ifdef CONFIG_HUGETLB_PAGE
652 static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
653 {
654 u64 pme = 0;
655 if (pte_present(pte))
656 pme = PM_PFRAME(pte_pfn(pte) + offset)
657 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
658 return pme;
659 }
660
661 /* This function walks within one hugetlb entry in the single call */
662 static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
663 unsigned long addr, unsigned long end,
664 struct mm_walk *walk)
665 {
666 struct pagemapread *pm = walk->private;
667 int err = 0;
668 u64 pfn;
669
670 for (; addr != end; addr += PAGE_SIZE) {
671 int offset = (addr & ~hmask) >> PAGE_SHIFT;
672 pfn = huge_pte_to_pagemap_entry(*pte, offset);
673 err = add_to_pagemap(addr, pfn, pm);
674 if (err)
675 return err;
676 }
677
678 cond_resched();
679
680 return err;
681 }
682 #endif /* HUGETLB_PAGE */
683
684 /*
685 * /proc/pid/pagemap - an array mapping virtual pages to pfns
686 *
687 * For each page in the address space, this file contains one 64-bit entry
688 * consisting of the following:
689 *
690 * Bits 0-55 page frame number (PFN) if present
691 * Bits 0-4 swap type if swapped
692 * Bits 5-55 swap offset if swapped
693 * Bits 55-60 page shift (page size = 1<<page shift)
694 * Bit 61 reserved for future use
695 * Bit 62 page swapped
696 * Bit 63 page present
697 *
698 * If the page is not present but in swap, then the PFN contains an
699 * encoding of the swap file number and the page's offset into the
700 * swap. Unmapped pages return a null PFN. This allows determining
701 * precisely which pages are mapped (or in swap) and comparing mapped
702 * pages between processes.
703 *
704 * Efficient users of this interface will use /proc/pid/maps to
705 * determine which areas of memory are actually mapped and llseek to
706 * skip over unmapped regions.
707 */
708 #define PAGEMAP_WALK_SIZE (PMD_SIZE)
709 #define PAGEMAP_WALK_MASK (PMD_MASK)
710 static ssize_t pagemap_read(struct file *file, char __user *buf,
711 size_t count, loff_t *ppos)
712 {
713 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
714 struct mm_struct *mm;
715 struct pagemapread pm;
716 int ret = -ESRCH;
717 struct mm_walk pagemap_walk = {};
718 unsigned long src;
719 unsigned long svpfn;
720 unsigned long start_vaddr;
721 unsigned long end_vaddr;
722 int copied = 0;
723
724 if (!task)
725 goto out;
726
727 ret = -EACCES;
728 if (!ptrace_may_access(task, PTRACE_MODE_READ))
729 goto out_task;
730
731 ret = -EINVAL;
732 /* file position must be aligned */
733 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
734 goto out_task;
735
736 ret = 0;
737
738 if (!count)
739 goto out_task;
740
741 mm = get_task_mm(task);
742 if (!mm)
743 goto out_task;
744
745 pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
746 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
747 ret = -ENOMEM;
748 if (!pm.buffer)
749 goto out_mm;
750
751 pagemap_walk.pmd_entry = pagemap_pte_range;
752 pagemap_walk.pte_hole = pagemap_pte_hole;
753 #ifdef CONFIG_HUGETLB_PAGE
754 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
755 #endif
756 pagemap_walk.mm = mm;
757 pagemap_walk.private = &pm;
758
759 src = *ppos;
760 svpfn = src / PM_ENTRY_BYTES;
761 start_vaddr = svpfn << PAGE_SHIFT;
762 end_vaddr = TASK_SIZE_OF(task);
763
764 /* watch out for wraparound */
765 if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
766 start_vaddr = end_vaddr;
767
768 /*
769 * The odds are that this will stop walking way
770 * before end_vaddr, because the length of the
771 * user buffer is tracked in "pm", and the walk
772 * will stop when we hit the end of the buffer.
773 */
774 ret = 0;
775 while (count && (start_vaddr < end_vaddr)) {
776 int len;
777 unsigned long end;
778
779 pm.pos = 0;
780 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
781 /* overflow ? */
782 if (end < start_vaddr || end > end_vaddr)
783 end = end_vaddr;
784 down_read(&mm->mmap_sem);
785 ret = walk_page_range(start_vaddr, end, &pagemap_walk);
786 up_read(&mm->mmap_sem);
787 start_vaddr = end;
788
789 len = min(count, PM_ENTRY_BYTES * pm.pos);
790 if (copy_to_user(buf, pm.buffer, len)) {
791 ret = -EFAULT;
792 goto out_free;
793 }
794 copied += len;
795 buf += len;
796 count -= len;
797 }
798 *ppos += copied;
799 if (!ret || ret == PM_END_OF_BUFFER)
800 ret = copied;
801
802 out_free:
803 kfree(pm.buffer);
804 out_mm:
805 mmput(mm);
806 out_task:
807 put_task_struct(task);
808 out:
809 return ret;
810 }
811
812 const struct file_operations proc_pagemap_operations = {
813 .llseek = mem_lseek, /* borrow this */
814 .read = pagemap_read,
815 };
816 #endif /* CONFIG_PROC_PAGE_MONITOR */
817
818 #ifdef CONFIG_NUMA
819 extern int show_numa_map(struct seq_file *m, void *v);
820
821 static const struct seq_operations proc_pid_numa_maps_op = {
822 .start = m_start,
823 .next = m_next,
824 .stop = m_stop,
825 .show = show_numa_map,
826 };
827
828 static int numa_maps_open(struct inode *inode, struct file *file)
829 {
830 return do_maps_open(inode, file, &proc_pid_numa_maps_op);
831 }
832
833 const struct file_operations proc_numa_maps_operations = {
834 .open = numa_maps_open,
835 .read = seq_read,
836 .llseek = seq_lseek,
837 .release = seq_release_private,
838 };
839 #endif
This page took 0.046346 seconds and 5 git commands to generate.