[PATCH] voyager: fix compile after setup rework
[deliverable/linux.git] / fs / proc / task_mmu.c
CommitLineData
1da177e4
LT
1#include <linux/mm.h>
2#include <linux/hugetlb.h>
3#include <linux/mount.h>
4#include <linux/seq_file.h>
e070ad49 5#include <linux/highmem.h>
6e21c8f1
CL
6#include <linux/pagemap.h>
7#include <linux/mempolicy.h>
e070ad49 8
1da177e4
LT
9#include <asm/elf.h>
10#include <asm/uaccess.h>
e070ad49 11#include <asm/tlbflush.h>
1da177e4
LT
12#include "internal.h"
13
14char *task_mem(struct mm_struct *mm, char *buffer)
15{
16 unsigned long data, text, lib;
365e9c87
HD
17 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
18
19 /*
20 * Note: to minimize their overhead, mm maintains hiwater_vm and
21 * hiwater_rss only when about to *lower* total_vm or rss. Any
22 * collector of these hiwater stats must therefore get total_vm
23 * and rss too, which will usually be the higher. Barriers? not
24 * worth the effort, such snapshots can always be inconsistent.
25 */
26 hiwater_vm = total_vm = mm->total_vm;
27 if (hiwater_vm < mm->hiwater_vm)
28 hiwater_vm = mm->hiwater_vm;
29 hiwater_rss = total_rss = get_mm_rss(mm);
30 if (hiwater_rss < mm->hiwater_rss)
31 hiwater_rss = mm->hiwater_rss;
1da177e4
LT
32
33 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
34 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
35 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
36 buffer += sprintf(buffer,
365e9c87 37 "VmPeak:\t%8lu kB\n"
1da177e4
LT
38 "VmSize:\t%8lu kB\n"
39 "VmLck:\t%8lu kB\n"
365e9c87 40 "VmHWM:\t%8lu kB\n"
1da177e4
LT
41 "VmRSS:\t%8lu kB\n"
42 "VmData:\t%8lu kB\n"
43 "VmStk:\t%8lu kB\n"
44 "VmExe:\t%8lu kB\n"
45 "VmLib:\t%8lu kB\n"
46 "VmPTE:\t%8lu kB\n",
365e9c87
HD
47 hiwater_vm << (PAGE_SHIFT-10),
48 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
1da177e4 49 mm->locked_vm << (PAGE_SHIFT-10),
365e9c87
HD
50 hiwater_rss << (PAGE_SHIFT-10),
51 total_rss << (PAGE_SHIFT-10),
1da177e4
LT
52 data << (PAGE_SHIFT-10),
53 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
54 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
55 return buffer;
56}
57
58unsigned long task_vsize(struct mm_struct *mm)
59{
60 return PAGE_SIZE * mm->total_vm;
61}
62
63int task_statm(struct mm_struct *mm, int *shared, int *text,
64 int *data, int *resident)
65{
4294621f 66 *shared = get_mm_counter(mm, file_rss);
1da177e4
LT
67 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
68 >> PAGE_SHIFT;
69 *data = mm->total_vm - mm->shared_vm;
4294621f 70 *resident = *shared + get_mm_counter(mm, anon_rss);
1da177e4
LT
71 return mm->total_vm;
72}
73
74int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
75{
76 struct vm_area_struct * vma;
77 int result = -ENOENT;
99f89551
EB
78 struct task_struct *task = get_proc_task(inode);
79 struct mm_struct * mm = NULL;
1da177e4 80
99f89551
EB
81 if (task) {
82 mm = get_task_mm(task);
83 put_task_struct(task);
84 }
1da177e4
LT
85 if (!mm)
86 goto out;
87 down_read(&mm->mmap_sem);
88
89 vma = mm->mmap;
90 while (vma) {
91 if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
92 break;
93 vma = vma->vm_next;
94 }
95
96 if (vma) {
97 *mnt = mntget(vma->vm_file->f_vfsmnt);
98 *dentry = dget(vma->vm_file->f_dentry);
99 result = 0;
100 }
101
102 up_read(&mm->mmap_sem);
103 mmput(mm);
104out:
105 return result;
106}
107
108static void pad_len_spaces(struct seq_file *m, int len)
109{
110 len = 25 + sizeof(void*) * 6 - len;
111 if (len < 1)
112 len = 1;
113 seq_printf(m, "%*c", len, ' ');
114}
115
e070ad49
ML
116struct mem_size_stats
117{
118 unsigned long resident;
119 unsigned long shared_clean;
120 unsigned long shared_dirty;
121 unsigned long private_clean;
122 unsigned long private_dirty;
123};
124
125static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
1da177e4 126{
99f89551
EB
127 struct proc_maps_private *priv = m->private;
128 struct task_struct *task = priv->task;
e070ad49
ML
129 struct vm_area_struct *vma = v;
130 struct mm_struct *mm = vma->vm_mm;
131 struct file *file = vma->vm_file;
132 int flags = vma->vm_flags;
1da177e4
LT
133 unsigned long ino = 0;
134 dev_t dev = 0;
135 int len;
136
137 if (file) {
e070ad49 138 struct inode *inode = vma->vm_file->f_dentry->d_inode;
1da177e4
LT
139 dev = inode->i_sb->s_dev;
140 ino = inode->i_ino;
141 }
142
143 seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
e070ad49
ML
144 vma->vm_start,
145 vma->vm_end,
1da177e4
LT
146 flags & VM_READ ? 'r' : '-',
147 flags & VM_WRITE ? 'w' : '-',
148 flags & VM_EXEC ? 'x' : '-',
149 flags & VM_MAYSHARE ? 's' : 'p',
e070ad49 150 vma->vm_pgoff << PAGE_SHIFT,
1da177e4
LT
151 MAJOR(dev), MINOR(dev), ino, &len);
152
153 /*
154 * Print the dentry name for named mappings, and a
155 * special [heap] marker for the heap:
156 */
e070ad49 157 if (file) {
1da177e4 158 pad_len_spaces(m, len);
e070ad49 159 seq_path(m, file->f_vfsmnt, file->f_dentry, "\n");
1da177e4
LT
160 } else {
161 if (mm) {
e070ad49
ML
162 if (vma->vm_start <= mm->start_brk &&
163 vma->vm_end >= mm->brk) {
1da177e4
LT
164 pad_len_spaces(m, len);
165 seq_puts(m, "[heap]");
166 } else {
e070ad49
ML
167 if (vma->vm_start <= mm->start_stack &&
168 vma->vm_end >= mm->start_stack) {
1da177e4
LT
169
170 pad_len_spaces(m, len);
171 seq_puts(m, "[stack]");
172 }
173 }
174 } else {
175 pad_len_spaces(m, len);
176 seq_puts(m, "[vdso]");
177 }
178 }
179 seq_putc(m, '\n');
e070ad49
ML
180
181 if (mss)
182 seq_printf(m,
183 "Size: %8lu kB\n"
184 "Rss: %8lu kB\n"
185 "Shared_Clean: %8lu kB\n"
186 "Shared_Dirty: %8lu kB\n"
187 "Private_Clean: %8lu kB\n"
188 "Private_Dirty: %8lu kB\n",
189 (vma->vm_end - vma->vm_start) >> 10,
190 mss->resident >> 10,
191 mss->shared_clean >> 10,
192 mss->shared_dirty >> 10,
193 mss->private_clean >> 10,
194 mss->private_dirty >> 10);
195
196 if (m->count < m->size) /* vma is copied successfully */
197 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
1da177e4
LT
198 return 0;
199}
200
e070ad49
ML
201static int show_map(struct seq_file *m, void *v)
202{
0f5c79f2 203 return show_map_internal(m, v, NULL);
e070ad49
ML
204}
205
206static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
207 unsigned long addr, unsigned long end,
208 struct mem_size_stats *mss)
209{
210 pte_t *pte, ptent;
705e87c0 211 spinlock_t *ptl;
e070ad49
ML
212 struct page *page;
213
705e87c0 214 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
e070ad49
ML
215 do {
216 ptent = *pte;
705e87c0 217 if (!pte_present(ptent))
e070ad49
ML
218 continue;
219
220 mss->resident += PAGE_SIZE;
ad820c5d
NP
221
222 page = vm_normal_page(vma, addr, ptent);
223 if (!page)
e070ad49
ML
224 continue;
225
ad820c5d 226 if (page_mapcount(page) >= 2) {
e070ad49
ML
227 if (pte_dirty(ptent))
228 mss->shared_dirty += PAGE_SIZE;
229 else
230 mss->shared_clean += PAGE_SIZE;
231 } else {
232 if (pte_dirty(ptent))
233 mss->private_dirty += PAGE_SIZE;
234 else
235 mss->private_clean += PAGE_SIZE;
236 }
237 } while (pte++, addr += PAGE_SIZE, addr != end);
705e87c0
HD
238 pte_unmap_unlock(pte - 1, ptl);
239 cond_resched();
e070ad49
ML
240}
241
242static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud,
243 unsigned long addr, unsigned long end,
244 struct mem_size_stats *mss)
245{
246 pmd_t *pmd;
247 unsigned long next;
248
249 pmd = pmd_offset(pud, addr);
250 do {
251 next = pmd_addr_end(addr, end);
252 if (pmd_none_or_clear_bad(pmd))
253 continue;
254 smaps_pte_range(vma, pmd, addr, next, mss);
255 } while (pmd++, addr = next, addr != end);
256}
257
258static inline void smaps_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
259 unsigned long addr, unsigned long end,
260 struct mem_size_stats *mss)
261{
262 pud_t *pud;
263 unsigned long next;
264
265 pud = pud_offset(pgd, addr);
266 do {
267 next = pud_addr_end(addr, end);
268 if (pud_none_or_clear_bad(pud))
269 continue;
270 smaps_pmd_range(vma, pud, addr, next, mss);
271 } while (pud++, addr = next, addr != end);
272}
273
274static inline void smaps_pgd_range(struct vm_area_struct *vma,
275 unsigned long addr, unsigned long end,
276 struct mem_size_stats *mss)
277{
278 pgd_t *pgd;
279 unsigned long next;
280
281 pgd = pgd_offset(vma->vm_mm, addr);
282 do {
283 next = pgd_addr_end(addr, end);
284 if (pgd_none_or_clear_bad(pgd))
285 continue;
286 smaps_pud_range(vma, pgd, addr, next, mss);
287 } while (pgd++, addr = next, addr != end);
288}
289
290static int show_smap(struct seq_file *m, void *v)
291{
292 struct vm_area_struct *vma = v;
e070ad49
ML
293 struct mem_size_stats mss;
294
295 memset(&mss, 0, sizeof mss);
5ddfae16 296 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
e070ad49 297 smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
e070ad49
ML
298 return show_map_internal(m, v, &mss);
299}
300
1da177e4
LT
301static void *m_start(struct seq_file *m, loff_t *pos)
302{
99f89551 303 struct proc_maps_private *priv = m->private;
1da177e4
LT
304 unsigned long last_addr = m->version;
305 struct mm_struct *mm;
99f89551 306 struct vm_area_struct *vma, *tail_vma = NULL;
1da177e4
LT
307 loff_t l = *pos;
308
99f89551
EB
309 /* Clear the per syscall fields in priv */
310 priv->task = NULL;
311 priv->tail_vma = NULL;
312
1da177e4
LT
313 /*
314 * We remember last_addr rather than next_addr to hit with
315 * mmap_cache most of the time. We have zero last_addr at
e070ad49
ML
316 * the beginning and also after lseek. We will have -1 last_addr
317 * after the end of the vmas.
1da177e4
LT
318 */
319
320 if (last_addr == -1UL)
321 return NULL;
322
13b41b09 323 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
99f89551
EB
324 if (!priv->task)
325 return NULL;
326
327 mm = get_task_mm(priv->task);
1da177e4
LT
328 if (!mm)
329 return NULL;
330
99f89551 331 priv->tail_vma = tail_vma = get_gate_vma(priv->task);
1da177e4
LT
332 down_read(&mm->mmap_sem);
333
334 /* Start with last addr hint */
e070ad49
ML
335 if (last_addr && (vma = find_vma(mm, last_addr))) {
336 vma = vma->vm_next;
1da177e4
LT
337 goto out;
338 }
339
340 /*
e070ad49 341 * Check the vma index is within the range and do
1da177e4
LT
342 * sequential scan until m_index.
343 */
e070ad49 344 vma = NULL;
1da177e4 345 if ((unsigned long)l < mm->map_count) {
e070ad49
ML
346 vma = mm->mmap;
347 while (l-- && vma)
348 vma = vma->vm_next;
1da177e4
LT
349 goto out;
350 }
351
352 if (l != mm->map_count)
e070ad49 353 tail_vma = NULL; /* After gate vma */
1da177e4
LT
354
355out:
e070ad49
ML
356 if (vma)
357 return vma;
1da177e4 358
e070ad49
ML
359 /* End of vmas has been reached */
360 m->version = (tail_vma != NULL)? 0: -1UL;
1da177e4
LT
361 up_read(&mm->mmap_sem);
362 mmput(mm);
e070ad49 363 return tail_vma;
1da177e4
LT
364}
365
99f89551 366static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
1da177e4 367{
99f89551 368 if (vma && vma != priv->tail_vma) {
e070ad49 369 struct mm_struct *mm = vma->vm_mm;
1da177e4
LT
370 up_read(&mm->mmap_sem);
371 mmput(mm);
372 }
373}
374
375static void *m_next(struct seq_file *m, void *v, loff_t *pos)
376{
99f89551 377 struct proc_maps_private *priv = m->private;
e070ad49 378 struct vm_area_struct *vma = v;
99f89551 379 struct vm_area_struct *tail_vma = priv->tail_vma;
1da177e4
LT
380
381 (*pos)++;
e070ad49
ML
382 if (vma && (vma != tail_vma) && vma->vm_next)
383 return vma->vm_next;
99f89551 384 vma_stop(priv, vma);
e070ad49 385 return (vma != tail_vma)? tail_vma: NULL;
1da177e4
LT
386}
387
99f89551
EB
388static void m_stop(struct seq_file *m, void *v)
389{
390 struct proc_maps_private *priv = m->private;
391 struct vm_area_struct *vma = v;
392
393 vma_stop(priv, vma);
394 if (priv->task)
395 put_task_struct(priv->task);
396}
397
662795de 398static struct seq_operations proc_pid_maps_op = {
1da177e4
LT
399 .start = m_start,
400 .next = m_next,
401 .stop = m_stop,
402 .show = show_map
403};
6e21c8f1 404
662795de 405static struct seq_operations proc_pid_smaps_op = {
e070ad49
ML
406 .start = m_start,
407 .next = m_next,
408 .stop = m_stop,
409 .show = show_smap
410};
411
662795de
EB
412static int do_maps_open(struct inode *inode, struct file *file,
413 struct seq_operations *ops)
414{
99f89551
EB
415 struct proc_maps_private *priv;
416 int ret = -ENOMEM;
417 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
418 if (priv) {
13b41b09 419 priv->pid = proc_pid(inode);
99f89551
EB
420 ret = seq_open(file, ops);
421 if (!ret) {
422 struct seq_file *m = file->private_data;
423 m->private = priv;
424 } else {
425 kfree(priv);
426 }
662795de
EB
427 }
428 return ret;
429}
430
431static int maps_open(struct inode *inode, struct file *file)
432{
433 return do_maps_open(inode, file, &proc_pid_maps_op);
434}
435
436struct file_operations proc_maps_operations = {
437 .open = maps_open,
438 .read = seq_read,
439 .llseek = seq_lseek,
99f89551 440 .release = seq_release_private,
662795de
EB
441};
442
6e21c8f1 443#ifdef CONFIG_NUMA
1a75a6c8 444extern int show_numa_map(struct seq_file *m, void *v);
6e21c8f1 445
662795de 446static struct seq_operations proc_pid_numa_maps_op = {
1a75a6c8
CL
447 .start = m_start,
448 .next = m_next,
449 .stop = m_stop,
450 .show = show_numa_map
6e21c8f1 451};
662795de
EB
452
453static int numa_maps_open(struct inode *inode, struct file *file)
454{
455 return do_maps_open(inode, file, &proc_pid_numa_maps_op);
456}
457
458struct file_operations proc_numa_maps_operations = {
459 .open = numa_maps_open,
460 .read = seq_read,
461 .llseek = seq_lseek,
99f89551 462 .release = seq_release_private,
662795de 463};
6e21c8f1 464#endif
662795de
EB
465
466static int smaps_open(struct inode *inode, struct file *file)
467{
468 return do_maps_open(inode, file, &proc_pid_smaps_op);
469}
470
471struct file_operations proc_smaps_operations = {
472 .open = smaps_open,
473 .read = seq_read,
474 .llseek = seq_lseek,
99f89551 475 .release = seq_release_private,
662795de 476};
This page took 0.186692 seconds and 5 git commands to generate.