ipc,shm: introduce lockless functions to obtain the ipc object
[deliverable/linux.git] / ipc / shm.c
CommitLineData
1da177e4
LT
1/*
2 * linux/ipc/shm.c
3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7 *
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15 *
073115d6
SG
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
4e982311
KK
18 *
19 * namespaces support
20 * OpenVZ, SWsoft Inc.
21 * Pavel Emelianov <xemul@openvz.org>
1da177e4
LT
22 */
23
1da177e4
LT
24#include <linux/slab.h>
25#include <linux/mm.h>
26#include <linux/hugetlb.h>
27#include <linux/shm.h>
28#include <linux/init.h>
29#include <linux/file.h>
30#include <linux/mman.h>
1da177e4
LT
31#include <linux/shmem_fs.h>
32#include <linux/security.h>
33#include <linux/syscalls.h>
34#include <linux/audit.h>
c59ede7b 35#include <linux/capability.h>
7d87e14c 36#include <linux/ptrace.h>
19b4946c 37#include <linux/seq_file.h>
3e148c79 38#include <linux/rwsem.h>
4e982311 39#include <linux/nsproxy.h>
bc56bba8 40#include <linux/mount.h>
ae5e1b22 41#include <linux/ipc_namespace.h>
7d87e14c 42
1da177e4
LT
43#include <asm/uaccess.h>
44
45#include "util.h"
46
bc56bba8
EB
47struct shm_file_data {
48 int id;
49 struct ipc_namespace *ns;
50 struct file *file;
51 const struct vm_operations_struct *vm_ops;
52};
53
54#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
55
9a32144e 56static const struct file_operations shm_file_operations;
f0f37e2f 57static const struct vm_operations_struct shm_vm_ops;
1da177e4 58
ed2ddbf8 59#define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
1da177e4 60
4e982311
KK
61#define shm_unlock(shp) \
62 ipc_unlock(&(shp)->shm_perm)
1da177e4 63
7748dbfa 64static int newseg(struct ipc_namespace *, struct ipc_params *);
bc56bba8
EB
65static void shm_open(struct vm_area_struct *vma);
66static void shm_close(struct vm_area_struct *vma);
4e982311 67static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
1da177e4 68#ifdef CONFIG_PROC_FS
19b4946c 69static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
1da177e4
LT
70#endif
71
ed2ddbf8 72void shm_init_ns(struct ipc_namespace *ns)
4e982311 73{
4e982311
KK
74 ns->shm_ctlmax = SHMMAX;
75 ns->shm_ctlall = SHMALL;
76 ns->shm_ctlmni = SHMMNI;
b34a6b1d 77 ns->shm_rmid_forced = 0;
4e982311 78 ns->shm_tot = 0;
e8148f75 79 ipc_init_ids(&shm_ids(ns));
4e982311
KK
80}
81
f4566f04 82/*
3e148c79
ND
83 * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
84 * Only shm_ids.rw_mutex remains locked on exit.
f4566f04 85 */
01b8b07a 86static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
4e982311 87{
01b8b07a
PP
88 struct shmid_kernel *shp;
89 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
90
4e982311
KK
91 if (shp->shm_nattch){
92 shp->shm_perm.mode |= SHM_DEST;
93 /* Do not find it any more */
94 shp->shm_perm.key = IPC_PRIVATE;
95 shm_unlock(shp);
96 } else
97 shm_destroy(ns, shp);
98}
99
ae5e1b22 100#ifdef CONFIG_IPC_NS
4e982311
KK
101void shm_exit_ns(struct ipc_namespace *ns)
102{
01b8b07a 103 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
7d6feeb2 104 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
4e982311 105}
ae5e1b22 106#endif
1da177e4 107
140d0b21 108static int __init ipc_ns_init(void)
1da177e4 109{
ed2ddbf8 110 shm_init_ns(&init_ipc_ns);
140d0b21
LT
111 return 0;
112}
113
114pure_initcall(ipc_ns_init);
115
116void __init shm_init (void)
117{
19b4946c 118 ipc_init_proc_interface("sysvipc/shm",
b7952180
HD
119#if BITS_PER_LONG <= 32
120 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
121#else
122 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
123#endif
4e982311 124 IPC_SHM_IDS, sysvipc_shm_proc_show);
1da177e4
LT
125}
126
8b8d52ac
DB
127static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
128{
129 struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
130
131 if (IS_ERR(ipcp))
132 return ERR_CAST(ipcp);
133
134 return container_of(ipcp, struct shmid_kernel, shm_perm);
135}
136
137static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
138{
139 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
140
141 if (IS_ERR(ipcp))
142 return ERR_CAST(ipcp);
143
144 return container_of(ipcp, struct shmid_kernel, shm_perm);
145}
146
3e148c79
ND
147/*
148 * shm_lock_(check_) routines are called in the paths where the rw_mutex
00c2bf85 149 * is not necessarily held.
3e148c79 150 */
023a5355 151static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
1da177e4 152{
03f02c76
ND
153 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
154
b1ed88b4
PP
155 if (IS_ERR(ipcp))
156 return (struct shmid_kernel *)ipcp;
157
03f02c76 158 return container_of(ipcp, struct shmid_kernel, shm_perm);
023a5355
ND
159}
160
4c677e2e
VK
161static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
162{
163 rcu_read_lock();
cf9d5d78 164 ipc_lock_object(&ipcp->shm_perm);
4c677e2e
VK
165}
166
023a5355
ND
167static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
168 int id)
169{
03f02c76
ND
170 struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
171
b1ed88b4
PP
172 if (IS_ERR(ipcp))
173 return (struct shmid_kernel *)ipcp;
174
03f02c76 175 return container_of(ipcp, struct shmid_kernel, shm_perm);
1da177e4
LT
176}
177
7ca7e564 178static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
1da177e4 179{
7ca7e564 180 ipc_rmid(&shm_ids(ns), &s->shm_perm);
1da177e4
LT
181}
182
1da177e4 183
bc56bba8
EB
184/* This is called by fork, once for every shm attach. */
185static void shm_open(struct vm_area_struct *vma)
4e982311 186{
bc56bba8
EB
187 struct file *file = vma->vm_file;
188 struct shm_file_data *sfd = shm_file_data(file);
1da177e4
LT
189 struct shmid_kernel *shp;
190
bc56bba8 191 shp = shm_lock(sfd->ns, sfd->id);
023a5355 192 BUG_ON(IS_ERR(shp));
1da177e4 193 shp->shm_atim = get_seconds();
b488893a 194 shp->shm_lprid = task_tgid_vnr(current);
1da177e4
LT
195 shp->shm_nattch++;
196 shm_unlock(shp);
197}
198
1da177e4
LT
199/*
200 * shm_destroy - free the struct shmid_kernel
201 *
f4566f04 202 * @ns: namespace
1da177e4
LT
203 * @shp: struct to free
204 *
3e148c79 205 * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
1da177e4
LT
206 * but returns with shp unlocked and freed.
207 */
4e982311 208static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
1da177e4 209{
4e982311 210 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
7ca7e564 211 shm_rmid(ns, shp);
1da177e4
LT
212 shm_unlock(shp);
213 if (!is_file_hugepages(shp->shm_file))
214 shmem_lock(shp->shm_file, 0, shp->mlock_user);
353d5c30 215 else if (shp->mlock_user)
496ad9aa 216 user_shm_unlock(file_inode(shp->shm_file)->i_size,
1da177e4
LT
217 shp->mlock_user);
218 fput (shp->shm_file);
219 security_shm_free(shp);
220 ipc_rcu_putref(shp);
221}
222
b34a6b1d
VK
223/*
224 * shm_may_destroy - identifies whether shm segment should be destroyed now
225 *
226 * Returns true if and only if there are no active users of the segment and
227 * one of the following is true:
228 *
229 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
230 *
231 * 2) sysctl kernel.shm_rmid_forced is set to 1.
232 */
233static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
234{
235 return (shp->shm_nattch == 0) &&
236 (ns->shm_rmid_forced ||
237 (shp->shm_perm.mode & SHM_DEST));
238}
239
1da177e4 240/*
bc56bba8 241 * remove the attach descriptor vma.
1da177e4
LT
242 * free memory for segment if it is marked destroyed.
243 * The descriptor has already been removed from the current->mm->mmap list
244 * and will later be kfree()d.
245 */
bc56bba8 246static void shm_close(struct vm_area_struct *vma)
1da177e4 247{
bc56bba8
EB
248 struct file * file = vma->vm_file;
249 struct shm_file_data *sfd = shm_file_data(file);
1da177e4 250 struct shmid_kernel *shp;
bc56bba8 251 struct ipc_namespace *ns = sfd->ns;
4e982311 252
3e148c79 253 down_write(&shm_ids(ns).rw_mutex);
1da177e4 254 /* remove from the list of attaches of the shm segment */
00c2bf85 255 shp = shm_lock(ns, sfd->id);
023a5355 256 BUG_ON(IS_ERR(shp));
b488893a 257 shp->shm_lprid = task_tgid_vnr(current);
1da177e4
LT
258 shp->shm_dtim = get_seconds();
259 shp->shm_nattch--;
b34a6b1d
VK
260 if (shm_may_destroy(ns, shp))
261 shm_destroy(ns, shp);
262 else
263 shm_unlock(shp);
264 up_write(&shm_ids(ns).rw_mutex);
265}
266
4c677e2e 267/* Called with ns->shm_ids(ns).rw_mutex locked */
b34a6b1d
VK
268static int shm_try_destroy_current(int id, void *p, void *data)
269{
270 struct ipc_namespace *ns = data;
4c677e2e
VK
271 struct kern_ipc_perm *ipcp = p;
272 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
b34a6b1d 273
4c677e2e 274 if (shp->shm_creator != current)
5774ed01 275 return 0;
5774ed01
VK
276
277 /*
278 * Mark it as orphaned to destroy the segment when
279 * kernel.shm_rmid_forced is changed.
280 * It is noop if the following shm_may_destroy() returns true.
281 */
282 shp->shm_creator = NULL;
283
284 /*
285 * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID
286 * is not set, it shouldn't be deleted here.
287 */
4c677e2e 288 if (!ns->shm_rmid_forced)
b34a6b1d 289 return 0;
b34a6b1d 290
4c677e2e
VK
291 if (shm_may_destroy(ns, shp)) {
292 shm_lock_by_ptr(shp);
b34a6b1d 293 shm_destroy(ns, shp);
4c677e2e 294 }
b34a6b1d
VK
295 return 0;
296}
297
4c677e2e 298/* Called with ns->shm_ids(ns).rw_mutex locked */
b34a6b1d
VK
299static int shm_try_destroy_orphaned(int id, void *p, void *data)
300{
301 struct ipc_namespace *ns = data;
4c677e2e
VK
302 struct kern_ipc_perm *ipcp = p;
303 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
b34a6b1d
VK
304
305 /*
306 * We want to destroy segments without users and with already
307 * exit'ed originating process.
4c677e2e
VK
308 *
309 * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
b34a6b1d 310 */
4c677e2e 311 if (shp->shm_creator != NULL)
b34a6b1d 312 return 0;
b34a6b1d 313
4c677e2e
VK
314 if (shm_may_destroy(ns, shp)) {
315 shm_lock_by_ptr(shp);
4e982311 316 shm_destroy(ns, shp);
4c677e2e 317 }
b34a6b1d
VK
318 return 0;
319}
320
321void shm_destroy_orphaned(struct ipc_namespace *ns)
322{
323 down_write(&shm_ids(ns).rw_mutex);
33a30ed4 324 if (shm_ids(ns).in_use)
4c677e2e 325 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
b34a6b1d
VK
326 up_write(&shm_ids(ns).rw_mutex);
327}
328
329
330void exit_shm(struct task_struct *task)
331{
4c677e2e 332 struct ipc_namespace *ns = task->nsproxy->ipc_ns;
b34a6b1d 333
298507d4
VK
334 if (shm_ids(ns).in_use == 0)
335 return;
336
b34a6b1d
VK
337 /* Destroy all already created segments, but not mapped yet */
338 down_write(&shm_ids(ns).rw_mutex);
33a30ed4 339 if (shm_ids(ns).in_use)
4c677e2e 340 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
3e148c79 341 up_write(&shm_ids(ns).rw_mutex);
1da177e4
LT
342}
343
d0217ac0 344static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
bc56bba8
EB
345{
346 struct file *file = vma->vm_file;
347 struct shm_file_data *sfd = shm_file_data(file);
348
d0217ac0 349 return sfd->vm_ops->fault(vma, vmf);
bc56bba8
EB
350}
351
352#ifdef CONFIG_NUMA
d823e3e7 353static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
bc56bba8
EB
354{
355 struct file *file = vma->vm_file;
356 struct shm_file_data *sfd = shm_file_data(file);
357 int err = 0;
358 if (sfd->vm_ops->set_policy)
359 err = sfd->vm_ops->set_policy(vma, new);
360 return err;
361}
362
d823e3e7
AB
363static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
364 unsigned long addr)
bc56bba8
EB
365{
366 struct file *file = vma->vm_file;
367 struct shm_file_data *sfd = shm_file_data(file);
368 struct mempolicy *pol = NULL;
369
370 if (sfd->vm_ops->get_policy)
371 pol = sfd->vm_ops->get_policy(vma, addr);
52cd3b07 372 else if (vma->vm_policy)
bc56bba8 373 pol = vma->vm_policy;
52cd3b07 374
bc56bba8
EB
375 return pol;
376}
377#endif
378
1da177e4
LT
379static int shm_mmap(struct file * file, struct vm_area_struct * vma)
380{
bc56bba8 381 struct shm_file_data *sfd = shm_file_data(file);
b0e15190
DH
382 int ret;
383
bc56bba8
EB
384 ret = sfd->file->f_op->mmap(sfd->file, vma);
385 if (ret != 0)
386 return ret;
387 sfd->vm_ops = vma->vm_ops;
2e92a3ba 388#ifdef CONFIG_MMU
54cb8821 389 BUG_ON(!sfd->vm_ops->fault);
2e92a3ba 390#endif
bc56bba8
EB
391 vma->vm_ops = &shm_vm_ops;
392 shm_open(vma);
b0e15190
DH
393
394 return ret;
1da177e4
LT
395}
396
4e982311
KK
397static int shm_release(struct inode *ino, struct file *file)
398{
bc56bba8 399 struct shm_file_data *sfd = shm_file_data(file);
4e982311 400
bc56bba8
EB
401 put_ipc_ns(sfd->ns);
402 shm_file_data(file) = NULL;
403 kfree(sfd);
4e982311
KK
404 return 0;
405}
406
02c24a82 407static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
516dffdc 408{
516dffdc 409 struct shm_file_data *sfd = shm_file_data(file);
516dffdc 410
7ea80859
CH
411 if (!sfd->file->f_op->fsync)
412 return -EINVAL;
02c24a82 413 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
516dffdc
AL
414}
415
7d8a4569
WD
416static long shm_fallocate(struct file *file, int mode, loff_t offset,
417 loff_t len)
418{
419 struct shm_file_data *sfd = shm_file_data(file);
420
421 if (!sfd->file->f_op->fallocate)
422 return -EOPNOTSUPP;
423 return sfd->file->f_op->fallocate(file, mode, offset, len);
424}
425
bc56bba8
EB
426static unsigned long shm_get_unmapped_area(struct file *file,
427 unsigned long addr, unsigned long len, unsigned long pgoff,
428 unsigned long flags)
429{
430 struct shm_file_data *sfd = shm_file_data(file);
c4caa778
AV
431 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
432 pgoff, flags);
bc56bba8 433}
bc56bba8 434
9a32144e 435static const struct file_operations shm_file_operations = {
4e982311 436 .mmap = shm_mmap,
516dffdc 437 .fsync = shm_fsync,
4e982311 438 .release = shm_release,
ed5e5894
DH
439#ifndef CONFIG_MMU
440 .get_unmapped_area = shm_get_unmapped_area,
441#endif
6038f373 442 .llseek = noop_llseek,
7d8a4569 443 .fallocate = shm_fallocate,
c4caa778
AV
444};
445
446static const struct file_operations shm_file_operations_huge = {
447 .mmap = shm_mmap,
448 .fsync = shm_fsync,
449 .release = shm_release,
bc56bba8 450 .get_unmapped_area = shm_get_unmapped_area,
6038f373 451 .llseek = noop_llseek,
7d8a4569 452 .fallocate = shm_fallocate,
1da177e4
LT
453};
454
c4caa778
AV
455int is_file_shm_hugepages(struct file *file)
456{
457 return file->f_op == &shm_file_operations_huge;
458}
459
f0f37e2f 460static const struct vm_operations_struct shm_vm_ops = {
1da177e4
LT
461 .open = shm_open, /* callback for a new vm-area open */
462 .close = shm_close, /* callback for when the vm-area is released */
54cb8821 463 .fault = shm_fault,
bc56bba8
EB
464#if defined(CONFIG_NUMA)
465 .set_policy = shm_set_policy,
466 .get_policy = shm_get_policy,
1da177e4
LT
467#endif
468};
469
f4566f04
ND
470/**
471 * newseg - Create a new shared memory segment
472 * @ns: namespace
473 * @params: ptr to the structure that contains key, size and shmflg
474 *
3e148c79 475 * Called with shm_ids.rw_mutex held as a writer.
f4566f04
ND
476 */
477
7748dbfa 478static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
1da177e4 479{
7748dbfa
ND
480 key_t key = params->key;
481 int shmflg = params->flg;
482 size_t size = params->u.size;
1da177e4
LT
483 int error;
484 struct shmid_kernel *shp;
d69f3bad 485 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1da177e4
LT
486 struct file * file;
487 char name[13];
488 int id;
ca16d140 489 vm_flags_t acctflag = 0;
1da177e4 490
4e982311 491 if (size < SHMMIN || size > ns->shm_ctlmax)
1da177e4
LT
492 return -EINVAL;
493
f66d45e9 494 if (ns->shm_tot + numpages > ns->shm_ctlall)
1da177e4
LT
495 return -ENOSPC;
496
497 shp = ipc_rcu_alloc(sizeof(*shp));
498 if (!shp)
499 return -ENOMEM;
500
501 shp->shm_perm.key = key;
b33291c0 502 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
1da177e4
LT
503 shp->mlock_user = NULL;
504
505 shp->shm_perm.security = NULL;
506 error = security_shm_alloc(shp);
507 if (error) {
508 ipc_rcu_putref(shp);
509 return error;
510 }
511
9d66586f 512 sprintf (name, "SYSV%08x", key);
1da177e4 513 if (shmflg & SHM_HUGETLB) {
c103a4dc 514 struct hstate *hs;
091d0d55
LZ
515 size_t hugesize;
516
c103a4dc 517 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
091d0d55
LZ
518 if (!hs) {
519 error = -EINVAL;
520 goto no_file;
521 }
522 hugesize = ALIGN(size, huge_page_size(hs));
af73e4d9 523
5a6fe125
MG
524 /* hugetlb_file_setup applies strict accounting */
525 if (shmflg & SHM_NORESERVE)
526 acctflag = VM_NORESERVE;
af73e4d9 527 file = hugetlb_file_setup(name, hugesize, acctflag,
42d7395f
AK
528 &shp->mlock_user, HUGETLB_SHMFS_INODE,
529 (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
1da177e4 530 } else {
bf8f972d
BP
531 /*
532 * Do not allow no accounting for OVERCOMMIT_NEVER, even
533 * if it's asked for.
534 */
535 if ((shmflg & SHM_NORESERVE) &&
536 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
fc8744ad 537 acctflag = VM_NORESERVE;
bf8f972d 538 file = shmem_file_setup(name, size, acctflag);
1da177e4
LT
539 }
540 error = PTR_ERR(file);
541 if (IS_ERR(file))
542 goto no_file;
543
48dea404 544 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
283bb7fa
PP
545 if (id < 0) {
546 error = id;
1da177e4 547 goto no_id;
283bb7fa 548 }
1da177e4 549
b488893a 550 shp->shm_cprid = task_tgid_vnr(current);
1da177e4
LT
551 shp->shm_lprid = 0;
552 shp->shm_atim = shp->shm_dtim = 0;
553 shp->shm_ctim = get_seconds();
554 shp->shm_segsz = size;
555 shp->shm_nattch = 0;
1da177e4 556 shp->shm_file = file;
5774ed01 557 shp->shm_creator = current;
dbfcd91f 558
30475cc1
BP
559 /*
560 * shmid gets reported as "inode#" in /proc/pid/maps.
561 * proc-ps tools use this. Changing this will break them.
562 */
496ad9aa 563 file_inode(file)->i_ino = shp->shm_perm.id;
551110a9 564
4e982311 565 ns->shm_tot += numpages;
7ca7e564 566 error = shp->shm_perm.id;
dbfcd91f 567
cf9d5d78 568 ipc_unlock_object(&shp->shm_perm);
dbfcd91f 569 rcu_read_unlock();
7ca7e564 570 return error;
1da177e4
LT
571
572no_id:
2195d281 573 if (is_file_hugepages(file) && shp->mlock_user)
353d5c30 574 user_shm_unlock(size, shp->mlock_user);
1da177e4
LT
575 fput(file);
576no_file:
577 security_shm_free(shp);
578 ipc_rcu_putref(shp);
579 return error;
580}
581
f4566f04 582/*
3e148c79 583 * Called with shm_ids.rw_mutex and ipcp locked.
f4566f04 584 */
03f02c76 585static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
7748dbfa 586{
03f02c76
ND
587 struct shmid_kernel *shp;
588
589 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
590 return security_shm_associate(shp, shmflg);
7748dbfa
ND
591}
592
f4566f04 593/*
3e148c79 594 * Called with shm_ids.rw_mutex and ipcp locked.
f4566f04 595 */
03f02c76
ND
596static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
597 struct ipc_params *params)
7748dbfa 598{
03f02c76
ND
599 struct shmid_kernel *shp;
600
601 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
602 if (shp->shm_segsz < params->u.size)
7748dbfa
ND
603 return -EINVAL;
604
605 return 0;
606}
607
d5460c99 608SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
1da177e4 609{
4e982311 610 struct ipc_namespace *ns;
7748dbfa
ND
611 struct ipc_ops shm_ops;
612 struct ipc_params shm_params;
4e982311
KK
613
614 ns = current->nsproxy->ipc_ns;
1da177e4 615
7748dbfa
ND
616 shm_ops.getnew = newseg;
617 shm_ops.associate = shm_security;
618 shm_ops.more_checks = shm_more_checks;
7ca7e564 619
7748dbfa
ND
620 shm_params.key = key;
621 shm_params.flg = shmflg;
622 shm_params.u.size = size;
1da177e4 623
7748dbfa 624 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
1da177e4
LT
625}
626
627static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
628{
629 switch(version) {
630 case IPC_64:
631 return copy_to_user(buf, in, sizeof(*in));
632 case IPC_OLD:
633 {
634 struct shmid_ds out;
635
3af54c9b 636 memset(&out, 0, sizeof(out));
1da177e4
LT
637 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
638 out.shm_segsz = in->shm_segsz;
639 out.shm_atime = in->shm_atime;
640 out.shm_dtime = in->shm_dtime;
641 out.shm_ctime = in->shm_ctime;
642 out.shm_cpid = in->shm_cpid;
643 out.shm_lpid = in->shm_lpid;
644 out.shm_nattch = in->shm_nattch;
645
646 return copy_to_user(buf, &out, sizeof(out));
647 }
648 default:
649 return -EINVAL;
650 }
651}
652
016d7132
PP
653static inline unsigned long
654copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
1da177e4
LT
655{
656 switch(version) {
657 case IPC_64:
016d7132 658 if (copy_from_user(out, buf, sizeof(*out)))
1da177e4 659 return -EFAULT;
1da177e4 660 return 0;
1da177e4
LT
661 case IPC_OLD:
662 {
663 struct shmid_ds tbuf_old;
664
665 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
666 return -EFAULT;
667
016d7132
PP
668 out->shm_perm.uid = tbuf_old.shm_perm.uid;
669 out->shm_perm.gid = tbuf_old.shm_perm.gid;
670 out->shm_perm.mode = tbuf_old.shm_perm.mode;
1da177e4
LT
671
672 return 0;
673 }
674 default:
675 return -EINVAL;
676 }
677}
678
679static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
680{
681 switch(version) {
682 case IPC_64:
683 return copy_to_user(buf, in, sizeof(*in));
684 case IPC_OLD:
685 {
686 struct shminfo out;
687
688 if(in->shmmax > INT_MAX)
689 out.shmmax = INT_MAX;
690 else
691 out.shmmax = (int)in->shmmax;
692
693 out.shmmin = in->shmmin;
694 out.shmmni = in->shmmni;
695 out.shmseg = in->shmseg;
696 out.shmall = in->shmall;
697
698 return copy_to_user(buf, &out, sizeof(out));
699 }
700 default:
701 return -EINVAL;
702 }
703}
704
b7952180
HD
705/*
706 * Calculate and add used RSS and swap pages of a shm.
707 * Called with shm_ids.rw_mutex held as a reader
708 */
709static void shm_add_rss_swap(struct shmid_kernel *shp,
710 unsigned long *rss_add, unsigned long *swp_add)
711{
712 struct inode *inode;
713
496ad9aa 714 inode = file_inode(shp->shm_file);
b7952180
HD
715
716 if (is_file_hugepages(shp->shm_file)) {
717 struct address_space *mapping = inode->i_mapping;
718 struct hstate *h = hstate_file(shp->shm_file);
719 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
720 } else {
721#ifdef CONFIG_SHMEM
722 struct shmem_inode_info *info = SHMEM_I(inode);
723 spin_lock(&info->lock);
724 *rss_add += inode->i_mapping->nrpages;
725 *swp_add += info->swapped;
726 spin_unlock(&info->lock);
727#else
728 *rss_add += inode->i_mapping->nrpages;
729#endif
730 }
731}
732
f4566f04 733/*
3e148c79 734 * Called with shm_ids.rw_mutex held as a reader
f4566f04 735 */
4e982311
KK
736static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
737 unsigned long *swp)
1da177e4 738{
7ca7e564
ND
739 int next_id;
740 int total, in_use;
1da177e4
LT
741
742 *rss = 0;
743 *swp = 0;
744
7ca7e564
ND
745 in_use = shm_ids(ns).in_use;
746
747 for (total = 0, next_id = 0; total < in_use; next_id++) {
e562aebc 748 struct kern_ipc_perm *ipc;
1da177e4 749 struct shmid_kernel *shp;
1da177e4 750
e562aebc
TB
751 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
752 if (ipc == NULL)
1da177e4 753 continue;
e562aebc 754 shp = container_of(ipc, struct shmid_kernel, shm_perm);
1da177e4 755
b7952180 756 shm_add_rss_swap(shp, rss, swp);
7ca7e564
ND
757
758 total++;
1da177e4
LT
759 }
760}
761
8d4cc8b5
PP
762/*
763 * This function handles some shmctl commands which require the rw_mutex
764 * to be held in write mode.
765 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
766 */
767static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
768 struct shmid_ds __user *buf, int version)
1da177e4 769{
8d4cc8b5 770 struct kern_ipc_perm *ipcp;
016d7132 771 struct shmid64_ds shmid64;
8d4cc8b5
PP
772 struct shmid_kernel *shp;
773 int err;
774
775 if (cmd == IPC_SET) {
016d7132 776 if (copy_shmid_from_user(&shmid64, buf, version))
8d4cc8b5
PP
777 return -EFAULT;
778 }
779
7b4cc5d8
DB
780 down_write(&shm_ids(ns).rw_mutex);
781 rcu_read_lock();
782
b0e77598
SH
783 ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd,
784 &shmid64.shm_perm, 0);
7b4cc5d8
DB
785 if (IS_ERR(ipcp)) {
786 err = PTR_ERR(ipcp);
787 /* the ipc lock is not held upon failure */
788 goto out_unlock1;
789 }
8d4cc8b5 790
a5f75e7f 791 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
8d4cc8b5
PP
792
793 err = security_shm_shmctl(shp, cmd);
794 if (err)
7b4cc5d8
DB
795 goto out_unlock0;
796
8d4cc8b5
PP
797 switch (cmd) {
798 case IPC_RMID:
7b4cc5d8 799 /* do_shm_rmid unlocks the ipc object and rcu */
8d4cc8b5
PP
800 do_shm_rmid(ns, ipcp);
801 goto out_up;
802 case IPC_SET:
1efdb69b
EB
803 err = ipc_update_perm(&shmid64.shm_perm, ipcp);
804 if (err)
7b4cc5d8 805 goto out_unlock0;
8d4cc8b5
PP
806 shp->shm_ctim = get_seconds();
807 break;
808 default:
809 err = -EINVAL;
810 }
7b4cc5d8
DB
811
812out_unlock0:
813 ipc_unlock_object(&shp->shm_perm);
814out_unlock1:
815 rcu_read_unlock();
8d4cc8b5
PP
816out_up:
817 up_write(&shm_ids(ns).rw_mutex);
818 return err;
819}
820
d5460c99 821SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
8d4cc8b5 822{
1da177e4
LT
823 struct shmid_kernel *shp;
824 int err, version;
4e982311 825 struct ipc_namespace *ns;
1da177e4
LT
826
827 if (cmd < 0 || shmid < 0) {
828 err = -EINVAL;
829 goto out;
830 }
831
832 version = ipc_parse_version(&cmd);
4e982311 833 ns = current->nsproxy->ipc_ns;
1da177e4
LT
834
835 switch (cmd) { /* replace with proc interface ? */
836 case IPC_INFO:
837 {
838 struct shminfo64 shminfo;
839
840 err = security_shm_shmctl(NULL, cmd);
841 if (err)
842 return err;
843
e8148f75 844 memset(&shminfo, 0, sizeof(shminfo));
4e982311
KK
845 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
846 shminfo.shmmax = ns->shm_ctlmax;
847 shminfo.shmall = ns->shm_ctlall;
1da177e4
LT
848
849 shminfo.shmmin = SHMMIN;
850 if(copy_shminfo_to_user (buf, &shminfo, version))
851 return -EFAULT;
f4566f04 852
3e148c79 853 down_read(&shm_ids(ns).rw_mutex);
7ca7e564 854 err = ipc_get_maxid(&shm_ids(ns));
3e148c79 855 up_read(&shm_ids(ns).rw_mutex);
f4566f04 856
1da177e4
LT
857 if(err<0)
858 err = 0;
859 goto out;
860 }
861 case SHM_INFO:
862 {
863 struct shm_info shm_info;
864
865 err = security_shm_shmctl(NULL, cmd);
866 if (err)
867 return err;
868
e8148f75 869 memset(&shm_info, 0, sizeof(shm_info));
3e148c79 870 down_read(&shm_ids(ns).rw_mutex);
4e982311
KK
871 shm_info.used_ids = shm_ids(ns).in_use;
872 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
873 shm_info.shm_tot = ns->shm_tot;
1da177e4
LT
874 shm_info.swap_attempts = 0;
875 shm_info.swap_successes = 0;
7ca7e564 876 err = ipc_get_maxid(&shm_ids(ns));
3e148c79 877 up_read(&shm_ids(ns).rw_mutex);
e8148f75 878 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
1da177e4
LT
879 err = -EFAULT;
880 goto out;
881 }
882
883 err = err < 0 ? 0 : err;
884 goto out;
885 }
886 case SHM_STAT:
887 case IPC_STAT:
888 {
889 struct shmid64_ds tbuf;
890 int result;
023a5355 891
023a5355
ND
892 if (cmd == SHM_STAT) {
893 shp = shm_lock(ns, shmid);
894 if (IS_ERR(shp)) {
895 err = PTR_ERR(shp);
896 goto out;
897 }
7ca7e564 898 result = shp->shm_perm.id;
1da177e4 899 } else {
023a5355
ND
900 shp = shm_lock_check(ns, shmid);
901 if (IS_ERR(shp)) {
902 err = PTR_ERR(shp);
903 goto out;
904 }
1da177e4
LT
905 result = 0;
906 }
e8148f75 907 err = -EACCES;
b0e77598 908 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
1da177e4
LT
909 goto out_unlock;
910 err = security_shm_shmctl(shp, cmd);
911 if (err)
912 goto out_unlock;
023a5355 913 memset(&tbuf, 0, sizeof(tbuf));
1da177e4
LT
914 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
915 tbuf.shm_segsz = shp->shm_segsz;
916 tbuf.shm_atime = shp->shm_atim;
917 tbuf.shm_dtime = shp->shm_dtim;
918 tbuf.shm_ctime = shp->shm_ctim;
919 tbuf.shm_cpid = shp->shm_cprid;
920 tbuf.shm_lpid = shp->shm_lprid;
bc56bba8 921 tbuf.shm_nattch = shp->shm_nattch;
1da177e4
LT
922 shm_unlock(shp);
923 if(copy_shmid_to_user (buf, &tbuf, version))
924 err = -EFAULT;
925 else
926 err = result;
927 goto out;
928 }
929 case SHM_LOCK:
930 case SHM_UNLOCK:
931 {
85046579 932 struct file *shm_file;
89e004ea 933
023a5355
ND
934 shp = shm_lock_check(ns, shmid);
935 if (IS_ERR(shp)) {
936 err = PTR_ERR(shp);
1da177e4
LT
937 goto out;
938 }
1da177e4 939
a33e6751 940 audit_ipc_obj(&(shp->shm_perm));
073115d6 941
b0e77598 942 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1efdb69b 943 kuid_t euid = current_euid();
1da177e4 944 err = -EPERM;
1efdb69b
EB
945 if (!uid_eq(euid, shp->shm_perm.uid) &&
946 !uid_eq(euid, shp->shm_perm.cuid))
1da177e4 947 goto out_unlock;
f1eb1332 948 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
1da177e4
LT
949 goto out_unlock;
950 }
951
952 err = security_shm_shmctl(shp, cmd);
953 if (err)
954 goto out_unlock;
85046579
HD
955
956 shm_file = shp->shm_file;
957 if (is_file_hugepages(shm_file))
958 goto out_unlock;
959
960 if (cmd == SHM_LOCK) {
86a264ab 961 struct user_struct *user = current_user();
85046579
HD
962 err = shmem_lock(shm_file, 1, user);
963 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
964 shp->shm_perm.mode |= SHM_LOCKED;
965 shp->mlock_user = user;
1da177e4 966 }
85046579 967 goto out_unlock;
1da177e4 968 }
85046579
HD
969
970 /* SHM_UNLOCK */
971 if (!(shp->shm_perm.mode & SHM_LOCKED))
972 goto out_unlock;
973 shmem_lock(shm_file, 0, shp->mlock_user);
974 shp->shm_perm.mode &= ~SHM_LOCKED;
975 shp->mlock_user = NULL;
976 get_file(shm_file);
1da177e4 977 shm_unlock(shp);
24513264 978 shmem_unlock_mapping(shm_file->f_mapping);
85046579 979 fput(shm_file);
1da177e4
LT
980 goto out;
981 }
982 case IPC_RMID:
1da177e4 983 case IPC_SET:
8d4cc8b5
PP
984 err = shmctl_down(ns, shmid, cmd, buf, version);
985 return err;
1da177e4 986 default:
8d4cc8b5 987 return -EINVAL;
1da177e4
LT
988 }
989
1da177e4
LT
990out_unlock:
991 shm_unlock(shp);
992out:
993 return err;
994}
995
996/*
997 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
998 *
999 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1000 * "raddr" thing points to kernel space, and there has to be a wrapper around
1001 * this.
1002 */
079a96ae
WD
1003long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1004 unsigned long shmlba)
1da177e4
LT
1005{
1006 struct shmid_kernel *shp;
1007 unsigned long addr;
1008 unsigned long size;
1009 struct file * file;
1010 int err;
1011 unsigned long flags;
1012 unsigned long prot;
1da177e4 1013 int acc_mode;
4e982311 1014 struct ipc_namespace *ns;
bc56bba8
EB
1015 struct shm_file_data *sfd;
1016 struct path path;
aeb5d727 1017 fmode_t f_mode;
41badc15 1018 unsigned long populate = 0;
1da177e4 1019
bc56bba8
EB
1020 err = -EINVAL;
1021 if (shmid < 0)
1da177e4 1022 goto out;
bc56bba8 1023 else if ((addr = (ulong)shmaddr)) {
079a96ae 1024 if (addr & (shmlba - 1)) {
1da177e4 1025 if (shmflg & SHM_RND)
079a96ae 1026 addr &= ~(shmlba - 1); /* round down */
1da177e4
LT
1027 else
1028#ifndef __ARCH_FORCE_SHMLBA
1029 if (addr & ~PAGE_MASK)
1030#endif
bc56bba8 1031 goto out;
1da177e4
LT
1032 }
1033 flags = MAP_SHARED | MAP_FIXED;
1034 } else {
1035 if ((shmflg & SHM_REMAP))
bc56bba8 1036 goto out;
1da177e4
LT
1037
1038 flags = MAP_SHARED;
1039 }
1040
1041 if (shmflg & SHM_RDONLY) {
1042 prot = PROT_READ;
1da177e4 1043 acc_mode = S_IRUGO;
bc56bba8 1044 f_mode = FMODE_READ;
1da177e4
LT
1045 } else {
1046 prot = PROT_READ | PROT_WRITE;
1da177e4 1047 acc_mode = S_IRUGO | S_IWUGO;
bc56bba8 1048 f_mode = FMODE_READ | FMODE_WRITE;
1da177e4
LT
1049 }
1050 if (shmflg & SHM_EXEC) {
1051 prot |= PROT_EXEC;
1052 acc_mode |= S_IXUGO;
1053 }
1054
1055 /*
1056 * We cannot rely on the fs check since SYSV IPC does have an
1057 * additional creator id...
1058 */
4e982311 1059 ns = current->nsproxy->ipc_ns;
023a5355
ND
1060 shp = shm_lock_check(ns, shmid);
1061 if (IS_ERR(shp)) {
1062 err = PTR_ERR(shp);
1da177e4 1063 goto out;
023a5355 1064 }
bc56bba8
EB
1065
1066 err = -EACCES;
b0e77598 1067 if (ipcperms(ns, &shp->shm_perm, acc_mode))
bc56bba8 1068 goto out_unlock;
1da177e4
LT
1069
1070 err = security_shm_shmat(shp, shmaddr, shmflg);
bc56bba8
EB
1071 if (err)
1072 goto out_unlock;
1073
2c48b9c4
AV
1074 path = shp->shm_file->f_path;
1075 path_get(&path);
1da177e4 1076 shp->shm_nattch++;
bc56bba8 1077 size = i_size_read(path.dentry->d_inode);
1da177e4
LT
1078 shm_unlock(shp);
1079
bc56bba8
EB
1080 err = -ENOMEM;
1081 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1082 if (!sfd)
ce8d2cdf 1083 goto out_put_dentry;
bc56bba8 1084
2c48b9c4
AV
1085 file = alloc_file(&path, f_mode,
1086 is_file_hugepages(shp->shm_file) ?
c4caa778
AV
1087 &shm_file_operations_huge :
1088 &shm_file_operations);
39b65252
AP
1089 err = PTR_ERR(file);
1090 if (IS_ERR(file))
bc56bba8
EB
1091 goto out_free;
1092
bc56bba8 1093 file->private_data = sfd;
bc56bba8 1094 file->f_mapping = shp->shm_file->f_mapping;
7ca7e564 1095 sfd->id = shp->shm_perm.id;
bc56bba8
EB
1096 sfd->ns = get_ipc_ns(ns);
1097 sfd->file = shp->shm_file;
1098 sfd->vm_ops = NULL;
1099
8b3ec681
AV
1100 err = security_mmap_file(file, prot, flags);
1101 if (err)
1102 goto out_fput;
1103
1da177e4
LT
1104 down_write(&current->mm->mmap_sem);
1105 if (addr && !(shmflg & SHM_REMAP)) {
bc56bba8 1106 err = -EINVAL;
1da177e4
LT
1107 if (find_vma_intersection(current->mm, addr, addr + size))
1108 goto invalid;
1109 /*
1110 * If shm segment goes below stack, make sure there is some
1111 * space left for the stack to grow (at least 4 pages).
1112 */
1113 if (addr < current->mm->start_stack &&
1114 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
1115 goto invalid;
1116 }
1117
bebeb3d6
ML
1118 addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
1119 *raddr = addr;
bc56bba8 1120 err = 0;
bebeb3d6
ML
1121 if (IS_ERR_VALUE(addr))
1122 err = (long)addr;
1da177e4
LT
1123invalid:
1124 up_write(&current->mm->mmap_sem);
bebeb3d6 1125 if (populate)
41badc15 1126 mm_populate(addr, populate);
1da177e4 1127
8b3ec681 1128out_fput:
bc56bba8
EB
1129 fput(file);
1130
1131out_nattch:
3e148c79 1132 down_write(&shm_ids(ns).rw_mutex);
00c2bf85 1133 shp = shm_lock(ns, shmid);
023a5355 1134 BUG_ON(IS_ERR(shp));
1da177e4 1135 shp->shm_nattch--;
b34a6b1d 1136 if (shm_may_destroy(ns, shp))
4e982311 1137 shm_destroy(ns, shp);
1da177e4
LT
1138 else
1139 shm_unlock(shp);
3e148c79 1140 up_write(&shm_ids(ns).rw_mutex);
1da177e4 1141
1da177e4
LT
1142out:
1143 return err;
bc56bba8
EB
1144
1145out_unlock:
1146 shm_unlock(shp);
1147 goto out;
1148
1149out_free:
1150 kfree(sfd);
ce8d2cdf 1151out_put_dentry:
2c48b9c4 1152 path_put(&path);
bc56bba8 1153 goto out_nattch;
1da177e4
LT
1154}
1155
d5460c99 1156SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
7d87e14c
SR
1157{
1158 unsigned long ret;
1159 long err;
1160
079a96ae 1161 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
7d87e14c
SR
1162 if (err)
1163 return err;
1164 force_successful_syscall_return();
1165 return (long)ret;
1166}
1167
1da177e4
LT
1168/*
1169 * detach and kill segment if marked destroyed.
1170 * The work is done in shm_close.
1171 */
d5460c99 1172SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1da177e4
LT
1173{
1174 struct mm_struct *mm = current->mm;
586c7e6a 1175 struct vm_area_struct *vma;
1da177e4 1176 unsigned long addr = (unsigned long)shmaddr;
1da177e4 1177 int retval = -EINVAL;
586c7e6a
MF
1178#ifdef CONFIG_MMU
1179 loff_t size = 0;
1180 struct vm_area_struct *next;
1181#endif
1da177e4 1182
df1e2fb5
HD
1183 if (addr & ~PAGE_MASK)
1184 return retval;
1185
1da177e4
LT
1186 down_write(&mm->mmap_sem);
1187
1188 /*
1189 * This function tries to be smart and unmap shm segments that
1190 * were modified by partial mlock or munmap calls:
1191 * - It first determines the size of the shm segment that should be
1192 * unmapped: It searches for a vma that is backed by shm and that
1193 * started at address shmaddr. It records it's size and then unmaps
1194 * it.
1195 * - Then it unmaps all shm vmas that started at shmaddr and that
1196 * are within the initially determined size.
1197 * Errors from do_munmap are ignored: the function only fails if
1198 * it's called with invalid parameters or if it's called to unmap
1199 * a part of a vma. Both calls in this function are for full vmas,
1200 * the parameters are directly copied from the vma itself and always
1201 * valid - therefore do_munmap cannot fail. (famous last words?)
1202 */
1203 /*
1204 * If it had been mremap()'d, the starting address would not
1205 * match the usual checks anyway. So assume all vma's are
1206 * above the starting address given.
1207 */
1208 vma = find_vma(mm, addr);
1209
8feae131 1210#ifdef CONFIG_MMU
1da177e4
LT
1211 while (vma) {
1212 next = vma->vm_next;
1213
1214 /*
1215 * Check if the starting address would match, i.e. it's
1216 * a fragment created by mprotect() and/or munmap(), or it
1217 * otherwise it starts at this address with no hassles.
1218 */
bc56bba8 1219 if ((vma->vm_ops == &shm_vm_ops) &&
1da177e4
LT
1220 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1221
1222
496ad9aa 1223 size = file_inode(vma->vm_file)->i_size;
1da177e4
LT
1224 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1225 /*
1226 * We discovered the size of the shm segment, so
1227 * break out of here and fall through to the next
1228 * loop that uses the size information to stop
1229 * searching for matching vma's.
1230 */
1231 retval = 0;
1232 vma = next;
1233 break;
1234 }
1235 vma = next;
1236 }
1237
1238 /*
1239 * We need look no further than the maximum address a fragment
1240 * could possibly have landed at. Also cast things to loff_t to
25985edc 1241 * prevent overflows and make comparisons vs. equal-width types.
1da177e4 1242 */
8e36709d 1243 size = PAGE_ALIGN(size);
1da177e4
LT
1244 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1245 next = vma->vm_next;
1246
1247 /* finding a matching vma now does not alter retval */
bc56bba8 1248 if ((vma->vm_ops == &shm_vm_ops) &&
1da177e4
LT
1249 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
1250
1251 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1252 vma = next;
1253 }
1254
8feae131
DH
1255#else /* CONFIG_MMU */
1256 /* under NOMMU conditions, the exact address to be destroyed must be
1257 * given */
1258 retval = -EINVAL;
1259 if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1260 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1261 retval = 0;
1262 }
1263
1264#endif
1265
1da177e4
LT
1266 up_write(&mm->mmap_sem);
1267 return retval;
1268}
1269
1270#ifdef CONFIG_PROC_FS
19b4946c 1271static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1da177e4 1272{
1efdb69b 1273 struct user_namespace *user_ns = seq_user_ns(s);
19b4946c 1274 struct shmid_kernel *shp = it;
b7952180
HD
1275 unsigned long rss = 0, swp = 0;
1276
1277 shm_add_rss_swap(shp, &rss, &swp);
1da177e4 1278
6c826818
PM
1279#if BITS_PER_LONG <= 32
1280#define SIZE_SPEC "%10lu"
1281#else
1282#define SIZE_SPEC "%21lu"
1283#endif
1da177e4 1284
6c826818
PM
1285 return seq_printf(s,
1286 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
b7952180
HD
1287 "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1288 SIZE_SPEC " " SIZE_SPEC "\n",
19b4946c 1289 shp->shm_perm.key,
7ca7e564 1290 shp->shm_perm.id,
b33291c0 1291 shp->shm_perm.mode,
19b4946c
MW
1292 shp->shm_segsz,
1293 shp->shm_cprid,
1294 shp->shm_lprid,
bc56bba8 1295 shp->shm_nattch,
1efdb69b
EB
1296 from_kuid_munged(user_ns, shp->shm_perm.uid),
1297 from_kgid_munged(user_ns, shp->shm_perm.gid),
1298 from_kuid_munged(user_ns, shp->shm_perm.cuid),
1299 from_kgid_munged(user_ns, shp->shm_perm.cgid),
19b4946c
MW
1300 shp->shm_atim,
1301 shp->shm_dtim,
b7952180
HD
1302 shp->shm_ctim,
1303 rss * PAGE_SIZE,
1304 swp * PAGE_SIZE);
1da177e4
LT
1305}
1306#endif
This page took 0.72964 seconds and 5 git commands to generate.