3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
8 * SMP-threaded, sysctl's added
9 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
10 * Enforced range limit on SEM_UNDO
11 * (c) 2001 Red Hat Inc
13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
14 * Further wakeup optimizations, documentation
15 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
22 * Pavel Emelianov <xemul@openvz.org>
24 * Implementation notes: (May 2010)
25 * This file implements System V semaphores.
27 * User space visible behavior:
28 * - FIFO ordering for semop() operations (just FIFO, not starvation
30 * - multiple semaphore operations that alter the same semaphore in
31 * one semop() are handled.
32 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
34 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
35 * - undo adjustments at process exit are limited to 0..SEMVMX.
36 * - namespace are supported.
37 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
38 * to /proc/sys/kernel/sem.
39 * - statistics about the usage are reported in /proc/sysvipc/sem.
43 * - all global variables are read-mostly.
44 * - semop() calls and semctl(RMID) are synchronized by RCU.
45 * - most operations do write operations (actually: spin_lock calls) to
46 * the per-semaphore array structure.
47 * Thus: Perfect SMP scaling between independent semaphore arrays.
48 * If multiple semaphores in one array are used, then cache line
49 * trashing on the semaphore array spinlock will limit the scaling.
50 * - semncnt and semzcnt are calculated on demand in count_semcnt()
51 * - the task that performs a successful semop() scans the list of all
52 * sleeping tasks and completes any pending operations that can be fulfilled.
53 * Semaphores are actively given to waiting tasks (necessary for FIFO).
54 * (see update_queue())
55 * - To improve the scalability, the actual wake-up calls are performed after
56 * dropping all locks. (see wake_up_sem_queue_prepare(),
57 * wake_up_sem_queue_do())
58 * - All work is done by the waker, the woken up task does not have to do
59 * anything - not even acquiring a lock or dropping a refcount.
60 * - A woken up task may not even touch the semaphore array anymore, it may
61 * have been destroyed already by a semctl(RMID).
62 * - The synchronizations between wake-ups due to a timeout/signal and a
63 * wake-up due to a completed semaphore operation is achieved by using an
64 * intermediate state (IN_WAKEUP).
65 * - UNDO values are stored in an array (one per process and per
66 * semaphore array, lazily allocated). For backwards compatibility, multiple
67 * modes for the UNDO variables are supported (per process, per thread)
68 * (see copy_semundo, CLONE_SYSVSEM)
69 * - There are two lists of the pending operations: a per-array list
70 * and per-semaphore list (stored in the array). This allows to achieve FIFO
71 * ordering without always scanning all pending operations.
72 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
75 #include <linux/slab.h>
76 #include <linux/spinlock.h>
77 #include <linux/init.h>
78 #include <linux/proc_fs.h>
79 #include <linux/time.h>
80 #include <linux/security.h>
81 #include <linux/syscalls.h>
82 #include <linux/audit.h>
83 #include <linux/capability.h>
84 #include <linux/seq_file.h>
85 #include <linux/rwsem.h>
86 #include <linux/nsproxy.h>
87 #include <linux/ipc_namespace.h>
89 #include <linux/uaccess.h>
92 /* One semaphore structure for each semaphore in the system. */
94 int semval
; /* current value */
96 * PID of the process that last modified the semaphore. For
97 * Linux, specifically these are:
99 * - semctl, via SETVAL and SETALL.
100 * - at task exit when performing undo adjustments (see exit_sem).
103 spinlock_t lock
; /* spinlock for fine-grained semtimedop */
104 struct list_head pending_alter
; /* pending single-sop operations */
105 /* that alter the semaphore */
106 struct list_head pending_const
; /* pending single-sop operations */
107 /* that do not alter the semaphore*/
108 time_t sem_otime
; /* candidate for sem_otime */
109 } ____cacheline_aligned_in_smp
;
111 /* One queue for each sleeping process in the system. */
113 struct list_head list
; /* queue of pending operations */
114 struct task_struct
*sleeper
; /* this process */
115 struct sem_undo
*undo
; /* undo structure */
116 int pid
; /* process id of requesting process */
117 int status
; /* completion status of operation */
118 struct sembuf
*sops
; /* array of pending operations */
119 struct sembuf
*blocking
; /* the operation that blocked */
120 int nsops
; /* number of operations */
121 int alter
; /* does *sops alter the array? */
124 /* Each task has a list of undo requests. They are executed automatically
125 * when the process exits.
128 struct list_head list_proc
; /* per-process list: *
129 * all undos from one process
131 struct rcu_head rcu
; /* rcu struct for sem_undo */
132 struct sem_undo_list
*ulp
; /* back ptr to sem_undo_list */
133 struct list_head list_id
; /* per semaphore array list:
134 * all undos for one array */
135 int semid
; /* semaphore set identifier */
136 short *semadj
; /* array of adjustments */
137 /* one per semaphore */
140 /* sem_undo_list controls shared access to the list of sem_undo structures
141 * that may be shared among all a CLONE_SYSVSEM task group.
143 struct sem_undo_list
{
146 struct list_head list_proc
;
150 #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
152 #define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
154 static int newary(struct ipc_namespace
*, struct ipc_params
*);
155 static void freeary(struct ipc_namespace
*, struct kern_ipc_perm
*);
156 #ifdef CONFIG_PROC_FS
157 static int sysvipc_sem_proc_show(struct seq_file
*s
, void *it
);
160 #define SEMMSL_FAST 256 /* 512 bytes on stack */
161 #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
166 * sem_array.complex_count,
167 * sem_array.pending{_alter,_cont},
168 * sem_array.sem_undo: global sem_lock() for read/write
169 * sem_undo.proc_next: only "current" is allowed to read/write that field.
171 * sem_array.sem_base[i].pending_{const,alter}:
172 * global or semaphore sem_lock() for read/write
175 #define sc_semmsl sem_ctls[0]
176 #define sc_semmns sem_ctls[1]
177 #define sc_semopm sem_ctls[2]
178 #define sc_semmni sem_ctls[3]
180 void sem_init_ns(struct ipc_namespace
*ns
)
182 ns
->sc_semmsl
= SEMMSL
;
183 ns
->sc_semmns
= SEMMNS
;
184 ns
->sc_semopm
= SEMOPM
;
185 ns
->sc_semmni
= SEMMNI
;
187 ipc_init_ids(&ns
->ids
[IPC_SEM_IDS
]);
191 void sem_exit_ns(struct ipc_namespace
*ns
)
193 free_ipcs(ns
, &sem_ids(ns
), freeary
);
194 idr_destroy(&ns
->ids
[IPC_SEM_IDS
].ipcs_idr
);
198 void __init
sem_init(void)
200 sem_init_ns(&init_ipc_ns
);
201 ipc_init_proc_interface("sysvipc/sem",
202 " key semid perms nsems uid gid cuid cgid otime ctime\n",
203 IPC_SEM_IDS
, sysvipc_sem_proc_show
);
207 * unmerge_queues - unmerge queues, if possible.
208 * @sma: semaphore array
210 * The function unmerges the wait queues if complex_count is 0.
211 * It must be called prior to dropping the global semaphore array lock.
213 static void unmerge_queues(struct sem_array
*sma
)
215 struct sem_queue
*q
, *tq
;
217 /* complex operations still around? */
218 if (sma
->complex_count
)
221 * We will switch back to simple mode.
222 * Move all pending operation back into the per-semaphore
225 list_for_each_entry_safe(q
, tq
, &sma
->pending_alter
, list
) {
227 curr
= &sma
->sem_base
[q
->sops
[0].sem_num
];
229 list_add_tail(&q
->list
, &curr
->pending_alter
);
231 INIT_LIST_HEAD(&sma
->pending_alter
);
235 * merge_queues - merge single semop queues into global queue
236 * @sma: semaphore array
238 * This function merges all per-semaphore queues into the global queue.
239 * It is necessary to achieve FIFO ordering for the pending single-sop
240 * operations when a multi-semop operation must sleep.
241 * Only the alter operations must be moved, the const operations can stay.
243 static void merge_queues(struct sem_array
*sma
)
246 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
247 struct sem
*sem
= sma
->sem_base
+ i
;
249 list_splice_init(&sem
->pending_alter
, &sma
->pending_alter
);
253 static void sem_rcu_free(struct rcu_head
*head
)
255 struct ipc_rcu
*p
= container_of(head
, struct ipc_rcu
, rcu
);
256 struct sem_array
*sma
= ipc_rcu_to_struct(p
);
258 security_sem_free(sma
);
263 * Wait until all currently ongoing simple ops have completed.
264 * Caller must own sem_perm.lock.
265 * New simple ops cannot start, because simple ops first check
266 * that sem_perm.lock is free.
267 * that a) sem_perm.lock is free and b) complex_count is 0.
269 static void sem_wait_array(struct sem_array
*sma
)
274 if (sma
->complex_count
) {
275 /* The thread that increased sma->complex_count waited on
276 * all sem->lock locks. Thus we don't need to wait again.
281 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
282 sem
= sma
->sem_base
+ i
;
283 spin_unlock_wait(&sem
->lock
);
288 * If the request contains only one semaphore operation, and there are
289 * no complex transactions pending, lock only the semaphore involved.
290 * Otherwise, lock the entire semaphore array, since we either have
291 * multiple semaphores in our own semops, or we need to look at
292 * semaphores from other pending complex operations.
294 static inline int sem_lock(struct sem_array
*sma
, struct sembuf
*sops
,
300 /* Complex operation - acquire a full lock */
301 ipc_lock_object(&sma
->sem_perm
);
303 /* And wait until all simple ops that are processed
304 * right now have dropped their locks.
311 * Only one semaphore affected - try to optimize locking.
313 * - optimized locking is possible if no complex operation
314 * is either enqueued or processed right now.
315 * - The test for enqueued complex ops is simple:
316 * sma->complex_count != 0
317 * - Testing for complex ops that are processed right now is
318 * a bit more difficult. Complex ops acquire the full lock
319 * and first wait that the running simple ops have completed.
321 * Thus: If we own a simple lock and the global lock is free
322 * and complex_count is now 0, then it will stay 0 and
323 * thus just locking sem->lock is sufficient.
325 sem
= sma
->sem_base
+ sops
->sem_num
;
327 if (sma
->complex_count
== 0) {
329 * It appears that no complex operation is around.
330 * Acquire the per-semaphore lock.
332 spin_lock(&sem
->lock
);
334 /* Then check that the global lock is free */
335 if (!spin_is_locked(&sma
->sem_perm
.lock
)) {
337 * We need a memory barrier with acquire semantics,
338 * otherwise we can race with another thread that does:
340 * spin_unlock(sem_perm.lock);
342 smp_acquire__after_ctrl_dep();
345 * Now repeat the test of complex_count:
346 * It can't change anymore until we drop sem->lock.
347 * Thus: if is now 0, then it will stay 0.
349 if (sma
->complex_count
== 0) {
350 /* fast path successful! */
351 return sops
->sem_num
;
354 spin_unlock(&sem
->lock
);
357 /* slow path: acquire the full lock */
358 ipc_lock_object(&sma
->sem_perm
);
360 if (sma
->complex_count
== 0) {
362 * There is no complex operation, thus we can switch
363 * back to the fast path.
365 spin_lock(&sem
->lock
);
366 ipc_unlock_object(&sma
->sem_perm
);
367 return sops
->sem_num
;
369 /* Not a false alarm, thus complete the sequence for a
377 static inline void sem_unlock(struct sem_array
*sma
, int locknum
)
381 ipc_unlock_object(&sma
->sem_perm
);
383 struct sem
*sem
= sma
->sem_base
+ locknum
;
384 spin_unlock(&sem
->lock
);
389 * sem_lock_(check_) routines are called in the paths where the rwsem
392 * The caller holds the RCU read lock.
394 static inline struct sem_array
*sem_obtain_lock(struct ipc_namespace
*ns
,
395 int id
, struct sembuf
*sops
, int nsops
, int *locknum
)
397 struct kern_ipc_perm
*ipcp
;
398 struct sem_array
*sma
;
400 ipcp
= ipc_obtain_object_idr(&sem_ids(ns
), id
);
402 return ERR_CAST(ipcp
);
404 sma
= container_of(ipcp
, struct sem_array
, sem_perm
);
405 *locknum
= sem_lock(sma
, sops
, nsops
);
407 /* ipc_rmid() may have already freed the ID while sem_lock
408 * was spinning: verify that the structure is still valid
410 if (ipc_valid_object(ipcp
))
411 return container_of(ipcp
, struct sem_array
, sem_perm
);
413 sem_unlock(sma
, *locknum
);
414 return ERR_PTR(-EINVAL
);
417 static inline struct sem_array
*sem_obtain_object(struct ipc_namespace
*ns
, int id
)
419 struct kern_ipc_perm
*ipcp
= ipc_obtain_object_idr(&sem_ids(ns
), id
);
422 return ERR_CAST(ipcp
);
424 return container_of(ipcp
, struct sem_array
, sem_perm
);
427 static inline struct sem_array
*sem_obtain_object_check(struct ipc_namespace
*ns
,
430 struct kern_ipc_perm
*ipcp
= ipc_obtain_object_check(&sem_ids(ns
), id
);
433 return ERR_CAST(ipcp
);
435 return container_of(ipcp
, struct sem_array
, sem_perm
);
438 static inline void sem_lock_and_putref(struct sem_array
*sma
)
440 sem_lock(sma
, NULL
, -1);
441 ipc_rcu_putref(sma
, sem_rcu_free
);
444 static inline void sem_rmid(struct ipc_namespace
*ns
, struct sem_array
*s
)
446 ipc_rmid(&sem_ids(ns
), &s
->sem_perm
);
450 * Lockless wakeup algorithm:
451 * Without the check/retry algorithm a lockless wakeup is possible:
452 * - queue.status is initialized to -EINTR before blocking.
453 * - wakeup is performed by
454 * * unlinking the queue entry from the pending list
455 * * setting queue.status to IN_WAKEUP
456 * This is the notification for the blocked thread that a
457 * result value is imminent.
458 * * call wake_up_process
459 * * set queue.status to the final value.
460 * - the previously blocked thread checks queue.status:
461 * * if it's IN_WAKEUP, then it must wait until the value changes
462 * * if it's not -EINTR, then the operation was completed by
463 * update_queue. semtimedop can return queue.status without
464 * performing any operation on the sem array.
465 * * otherwise it must acquire the spinlock and check what's up.
467 * The two-stage algorithm is necessary to protect against the following
469 * - if queue.status is set after wake_up_process, then the woken up idle
470 * thread could race forward and try (and fail) to acquire sma->lock
471 * before update_queue had a chance to set queue.status
472 * - if queue.status is written before wake_up_process and if the
473 * blocked process is woken up by a signal between writing
474 * queue.status and the wake_up_process, then the woken up
475 * process could return from semtimedop and die by calling
476 * sys_exit before wake_up_process is called. Then wake_up_process
477 * will oops, because the task structure is already invalid.
478 * (yes, this happened on s390 with sysv msg).
484 * newary - Create a new semaphore set
486 * @params: ptr to the structure that contains key, semflg and nsems
488 * Called with sem_ids.rwsem held (as a writer)
490 static int newary(struct ipc_namespace
*ns
, struct ipc_params
*params
)
494 struct sem_array
*sma
;
496 key_t key
= params
->key
;
497 int nsems
= params
->u
.nsems
;
498 int semflg
= params
->flg
;
503 if (ns
->used_sems
+ nsems
> ns
->sc_semmns
)
506 size
= sizeof(*sma
) + nsems
* sizeof(struct sem
);
507 sma
= ipc_rcu_alloc(size
);
511 memset(sma
, 0, size
);
513 sma
->sem_perm
.mode
= (semflg
& S_IRWXUGO
);
514 sma
->sem_perm
.key
= key
;
516 sma
->sem_perm
.security
= NULL
;
517 retval
= security_sem_alloc(sma
);
519 ipc_rcu_putref(sma
, ipc_rcu_free
);
523 sma
->sem_base
= (struct sem
*) &sma
[1];
525 for (i
= 0; i
< nsems
; i
++) {
526 INIT_LIST_HEAD(&sma
->sem_base
[i
].pending_alter
);
527 INIT_LIST_HEAD(&sma
->sem_base
[i
].pending_const
);
528 spin_lock_init(&sma
->sem_base
[i
].lock
);
531 sma
->complex_count
= 0;
532 INIT_LIST_HEAD(&sma
->pending_alter
);
533 INIT_LIST_HEAD(&sma
->pending_const
);
534 INIT_LIST_HEAD(&sma
->list_id
);
535 sma
->sem_nsems
= nsems
;
536 sma
->sem_ctime
= get_seconds();
538 id
= ipc_addid(&sem_ids(ns
), &sma
->sem_perm
, ns
->sc_semmni
);
540 ipc_rcu_putref(sma
, sem_rcu_free
);
543 ns
->used_sems
+= nsems
;
548 return sma
->sem_perm
.id
;
553 * Called with sem_ids.rwsem and ipcp locked.
555 static inline int sem_security(struct kern_ipc_perm
*ipcp
, int semflg
)
557 struct sem_array
*sma
;
559 sma
= container_of(ipcp
, struct sem_array
, sem_perm
);
560 return security_sem_associate(sma
, semflg
);
564 * Called with sem_ids.rwsem and ipcp locked.
566 static inline int sem_more_checks(struct kern_ipc_perm
*ipcp
,
567 struct ipc_params
*params
)
569 struct sem_array
*sma
;
571 sma
= container_of(ipcp
, struct sem_array
, sem_perm
);
572 if (params
->u
.nsems
> sma
->sem_nsems
)
578 SYSCALL_DEFINE3(semget
, key_t
, key
, int, nsems
, int, semflg
)
580 struct ipc_namespace
*ns
;
581 static const struct ipc_ops sem_ops
= {
583 .associate
= sem_security
,
584 .more_checks
= sem_more_checks
,
586 struct ipc_params sem_params
;
588 ns
= current
->nsproxy
->ipc_ns
;
590 if (nsems
< 0 || nsems
> ns
->sc_semmsl
)
593 sem_params
.key
= key
;
594 sem_params
.flg
= semflg
;
595 sem_params
.u
.nsems
= nsems
;
597 return ipcget(ns
, &sem_ids(ns
), &sem_ops
, &sem_params
);
601 * perform_atomic_semop - Perform (if possible) a semaphore operation
602 * @sma: semaphore array
603 * @q: struct sem_queue that describes the operation
605 * Returns 0 if the operation was possible.
606 * Returns 1 if the operation is impossible, the caller must sleep.
607 * Negative values are error codes.
609 static int perform_atomic_semop(struct sem_array
*sma
, struct sem_queue
*q
)
611 int result
, sem_op
, nsops
, pid
;
621 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
622 curr
= sma
->sem_base
+ sop
->sem_num
;
623 sem_op
= sop
->sem_op
;
624 result
= curr
->semval
;
626 if (!sem_op
&& result
)
635 if (sop
->sem_flg
& SEM_UNDO
) {
636 int undo
= un
->semadj
[sop
->sem_num
] - sem_op
;
637 /* Exceeding the undo range is an error. */
638 if (undo
< (-SEMAEM
- 1) || undo
> SEMAEM
)
640 un
->semadj
[sop
->sem_num
] = undo
;
643 curr
->semval
= result
;
648 while (sop
>= sops
) {
649 sma
->sem_base
[sop
->sem_num
].sempid
= pid
;
662 if (sop
->sem_flg
& IPC_NOWAIT
)
669 while (sop
>= sops
) {
670 sem_op
= sop
->sem_op
;
671 sma
->sem_base
[sop
->sem_num
].semval
-= sem_op
;
672 if (sop
->sem_flg
& SEM_UNDO
)
673 un
->semadj
[sop
->sem_num
] += sem_op
;
680 /** wake_up_sem_queue_prepare(q, error): Prepare wake-up
681 * @q: queue entry that must be signaled
682 * @error: Error value for the signal
684 * Prepare the wake-up of the queue entry q.
686 static void wake_up_sem_queue_prepare(struct list_head
*pt
,
687 struct sem_queue
*q
, int error
)
689 if (list_empty(pt
)) {
691 * Hold preempt off so that we don't get preempted and have the
692 * wakee busy-wait until we're scheduled back on.
696 q
->status
= IN_WAKEUP
;
699 list_add_tail(&q
->list
, pt
);
703 * wake_up_sem_queue_do - do the actual wake-up
704 * @pt: list of tasks to be woken up
706 * Do the actual wake-up.
707 * The function is called without any locks held, thus the semaphore array
708 * could be destroyed already and the tasks can disappear as soon as the
709 * status is set to the actual return code.
711 static void wake_up_sem_queue_do(struct list_head
*pt
)
713 struct sem_queue
*q
, *t
;
716 did_something
= !list_empty(pt
);
717 list_for_each_entry_safe(q
, t
, pt
, list
) {
718 wake_up_process(q
->sleeper
);
719 /* q can disappear immediately after writing q->status. */
727 static void unlink_queue(struct sem_array
*sma
, struct sem_queue
*q
)
731 sma
->complex_count
--;
734 /** check_restart(sma, q)
735 * @sma: semaphore array
736 * @q: the operation that just completed
738 * update_queue is O(N^2) when it restarts scanning the whole queue of
739 * waiting operations. Therefore this function checks if the restart is
740 * really necessary. It is called after a previously waiting operation
741 * modified the array.
742 * Note that wait-for-zero operations are handled without restart.
744 static int check_restart(struct sem_array
*sma
, struct sem_queue
*q
)
746 /* pending complex alter operations are too difficult to analyse */
747 if (!list_empty(&sma
->pending_alter
))
750 /* we were a sleeping complex operation. Too difficult */
754 /* It is impossible that someone waits for the new value:
755 * - complex operations always restart.
756 * - wait-for-zero are handled seperately.
757 * - q is a previously sleeping simple operation that
758 * altered the array. It must be a decrement, because
759 * simple increments never sleep.
760 * - If there are older (higher priority) decrements
761 * in the queue, then they have observed the original
762 * semval value and couldn't proceed. The operation
763 * decremented to value - thus they won't proceed either.
769 * wake_const_ops - wake up non-alter tasks
770 * @sma: semaphore array.
771 * @semnum: semaphore that was modified.
772 * @pt: list head for the tasks that must be woken up.
774 * wake_const_ops must be called after a semaphore in a semaphore array
775 * was set to 0. If complex const operations are pending, wake_const_ops must
776 * be called with semnum = -1, as well as with the number of each modified
778 * The tasks that must be woken up are added to @pt. The return code
779 * is stored in q->pid.
780 * The function returns 1 if at least one operation was completed successfully.
782 static int wake_const_ops(struct sem_array
*sma
, int semnum
,
783 struct list_head
*pt
)
786 struct list_head
*walk
;
787 struct list_head
*pending_list
;
788 int semop_completed
= 0;
791 pending_list
= &sma
->pending_const
;
793 pending_list
= &sma
->sem_base
[semnum
].pending_const
;
795 walk
= pending_list
->next
;
796 while (walk
!= pending_list
) {
799 q
= container_of(walk
, struct sem_queue
, list
);
802 error
= perform_atomic_semop(sma
, q
);
805 /* operation completed, remove from queue & wakeup */
807 unlink_queue(sma
, q
);
809 wake_up_sem_queue_prepare(pt
, q
, error
);
814 return semop_completed
;
818 * do_smart_wakeup_zero - wakeup all wait for zero tasks
819 * @sma: semaphore array
820 * @sops: operations that were performed
821 * @nsops: number of operations
822 * @pt: list head of the tasks that must be woken up.
824 * Checks all required queue for wait-for-zero operations, based
825 * on the actual changes that were performed on the semaphore array.
826 * The function returns 1 if at least one operation was completed successfully.
828 static int do_smart_wakeup_zero(struct sem_array
*sma
, struct sembuf
*sops
,
829 int nsops
, struct list_head
*pt
)
832 int semop_completed
= 0;
835 /* first: the per-semaphore queues, if known */
837 for (i
= 0; i
< nsops
; i
++) {
838 int num
= sops
[i
].sem_num
;
840 if (sma
->sem_base
[num
].semval
== 0) {
842 semop_completed
|= wake_const_ops(sma
, num
, pt
);
847 * No sops means modified semaphores not known.
848 * Assume all were changed.
850 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
851 if (sma
->sem_base
[i
].semval
== 0) {
853 semop_completed
|= wake_const_ops(sma
, i
, pt
);
858 * If one of the modified semaphores got 0,
859 * then check the global queue, too.
862 semop_completed
|= wake_const_ops(sma
, -1, pt
);
864 return semop_completed
;
869 * update_queue - look for tasks that can be completed.
870 * @sma: semaphore array.
871 * @semnum: semaphore that was modified.
872 * @pt: list head for the tasks that must be woken up.
874 * update_queue must be called after a semaphore in a semaphore array
875 * was modified. If multiple semaphores were modified, update_queue must
876 * be called with semnum = -1, as well as with the number of each modified
878 * The tasks that must be woken up are added to @pt. The return code
879 * is stored in q->pid.
880 * The function internally checks if const operations can now succeed.
882 * The function return 1 if at least one semop was completed successfully.
884 static int update_queue(struct sem_array
*sma
, int semnum
, struct list_head
*pt
)
887 struct list_head
*walk
;
888 struct list_head
*pending_list
;
889 int semop_completed
= 0;
892 pending_list
= &sma
->pending_alter
;
894 pending_list
= &sma
->sem_base
[semnum
].pending_alter
;
897 walk
= pending_list
->next
;
898 while (walk
!= pending_list
) {
901 q
= container_of(walk
, struct sem_queue
, list
);
904 /* If we are scanning the single sop, per-semaphore list of
905 * one semaphore and that semaphore is 0, then it is not
906 * necessary to scan further: simple increments
907 * that affect only one entry succeed immediately and cannot
908 * be in the per semaphore pending queue, and decrements
909 * cannot be successful if the value is already 0.
911 if (semnum
!= -1 && sma
->sem_base
[semnum
].semval
== 0)
914 error
= perform_atomic_semop(sma
, q
);
916 /* Does q->sleeper still need to sleep? */
920 unlink_queue(sma
, q
);
926 do_smart_wakeup_zero(sma
, q
->sops
, q
->nsops
, pt
);
927 restart
= check_restart(sma
, q
);
930 wake_up_sem_queue_prepare(pt
, q
, error
);
934 return semop_completed
;
938 * set_semotime - set sem_otime
939 * @sma: semaphore array
940 * @sops: operations that modified the array, may be NULL
942 * sem_otime is replicated to avoid cache line trashing.
943 * This function sets one instance to the current time.
945 static void set_semotime(struct sem_array
*sma
, struct sembuf
*sops
)
948 sma
->sem_base
[0].sem_otime
= get_seconds();
950 sma
->sem_base
[sops
[0].sem_num
].sem_otime
=
956 * do_smart_update - optimized update_queue
957 * @sma: semaphore array
958 * @sops: operations that were performed
959 * @nsops: number of operations
960 * @otime: force setting otime
961 * @pt: list head of the tasks that must be woken up.
963 * do_smart_update() does the required calls to update_queue and wakeup_zero,
964 * based on the actual changes that were performed on the semaphore array.
965 * Note that the function does not do the actual wake-up: the caller is
966 * responsible for calling wake_up_sem_queue_do(@pt).
967 * It is safe to perform this call after dropping all locks.
969 static void do_smart_update(struct sem_array
*sma
, struct sembuf
*sops
, int nsops
,
970 int otime
, struct list_head
*pt
)
974 otime
|= do_smart_wakeup_zero(sma
, sops
, nsops
, pt
);
976 if (!list_empty(&sma
->pending_alter
)) {
977 /* semaphore array uses the global queue - just process it. */
978 otime
|= update_queue(sma
, -1, pt
);
982 * No sops, thus the modified semaphores are not
985 for (i
= 0; i
< sma
->sem_nsems
; i
++)
986 otime
|= update_queue(sma
, i
, pt
);
989 * Check the semaphores that were increased:
990 * - No complex ops, thus all sleeping ops are
992 * - if we decreased the value, then any sleeping
993 * semaphore ops wont be able to run: If the
994 * previous value was too small, then the new
995 * value will be too small, too.
997 for (i
= 0; i
< nsops
; i
++) {
998 if (sops
[i
].sem_op
> 0) {
999 otime
|= update_queue(sma
,
1000 sops
[i
].sem_num
, pt
);
1006 set_semotime(sma
, sops
);
1010 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1012 static int check_qop(struct sem_array
*sma
, int semnum
, struct sem_queue
*q
,
1015 struct sembuf
*sop
= q
->blocking
;
1018 * Linux always (since 0.99.10) reported a task as sleeping on all
1019 * semaphores. This violates SUS, therefore it was changed to the
1020 * standard compliant behavior.
1021 * Give the administrators a chance to notice that an application
1022 * might misbehave because it relies on the Linux behavior.
1024 pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1025 "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1026 current
->comm
, task_pid_nr(current
));
1028 if (sop
->sem_num
!= semnum
)
1031 if (count_zero
&& sop
->sem_op
== 0)
1033 if (!count_zero
&& sop
->sem_op
< 0)
1039 /* The following counts are associated to each semaphore:
1040 * semncnt number of tasks waiting on semval being nonzero
1041 * semzcnt number of tasks waiting on semval being zero
1043 * Per definition, a task waits only on the semaphore of the first semop
1044 * that cannot proceed, even if additional operation would block, too.
1046 static int count_semcnt(struct sem_array
*sma
, ushort semnum
,
1049 struct list_head
*l
;
1050 struct sem_queue
*q
;
1054 /* First: check the simple operations. They are easy to evaluate */
1056 l
= &sma
->sem_base
[semnum
].pending_const
;
1058 l
= &sma
->sem_base
[semnum
].pending_alter
;
1060 list_for_each_entry(q
, l
, list
) {
1061 /* all task on a per-semaphore list sleep on exactly
1067 /* Then: check the complex operations. */
1068 list_for_each_entry(q
, &sma
->pending_alter
, list
) {
1069 semcnt
+= check_qop(sma
, semnum
, q
, count_zero
);
1072 list_for_each_entry(q
, &sma
->pending_const
, list
) {
1073 semcnt
+= check_qop(sma
, semnum
, q
, count_zero
);
1079 /* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1080 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1081 * remains locked on exit.
1083 static void freeary(struct ipc_namespace
*ns
, struct kern_ipc_perm
*ipcp
)
1085 struct sem_undo
*un
, *tu
;
1086 struct sem_queue
*q
, *tq
;
1087 struct sem_array
*sma
= container_of(ipcp
, struct sem_array
, sem_perm
);
1088 struct list_head tasks
;
1091 /* Free the existing undo structures for this semaphore set. */
1092 ipc_assert_locked_object(&sma
->sem_perm
);
1093 list_for_each_entry_safe(un
, tu
, &sma
->list_id
, list_id
) {
1094 list_del(&un
->list_id
);
1095 spin_lock(&un
->ulp
->lock
);
1097 list_del_rcu(&un
->list_proc
);
1098 spin_unlock(&un
->ulp
->lock
);
1102 /* Wake up all pending processes and let them fail with EIDRM. */
1103 INIT_LIST_HEAD(&tasks
);
1104 list_for_each_entry_safe(q
, tq
, &sma
->pending_const
, list
) {
1105 unlink_queue(sma
, q
);
1106 wake_up_sem_queue_prepare(&tasks
, q
, -EIDRM
);
1109 list_for_each_entry_safe(q
, tq
, &sma
->pending_alter
, list
) {
1110 unlink_queue(sma
, q
);
1111 wake_up_sem_queue_prepare(&tasks
, q
, -EIDRM
);
1113 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
1114 struct sem
*sem
= sma
->sem_base
+ i
;
1115 list_for_each_entry_safe(q
, tq
, &sem
->pending_const
, list
) {
1116 unlink_queue(sma
, q
);
1117 wake_up_sem_queue_prepare(&tasks
, q
, -EIDRM
);
1119 list_for_each_entry_safe(q
, tq
, &sem
->pending_alter
, list
) {
1120 unlink_queue(sma
, q
);
1121 wake_up_sem_queue_prepare(&tasks
, q
, -EIDRM
);
1125 /* Remove the semaphore set from the IDR */
1127 sem_unlock(sma
, -1);
1130 wake_up_sem_queue_do(&tasks
);
1131 ns
->used_sems
-= sma
->sem_nsems
;
1132 ipc_rcu_putref(sma
, sem_rcu_free
);
1135 static unsigned long copy_semid_to_user(void __user
*buf
, struct semid64_ds
*in
, int version
)
1139 return copy_to_user(buf
, in
, sizeof(*in
));
1142 struct semid_ds out
;
1144 memset(&out
, 0, sizeof(out
));
1146 ipc64_perm_to_ipc_perm(&in
->sem_perm
, &out
.sem_perm
);
1148 out
.sem_otime
= in
->sem_otime
;
1149 out
.sem_ctime
= in
->sem_ctime
;
1150 out
.sem_nsems
= in
->sem_nsems
;
1152 return copy_to_user(buf
, &out
, sizeof(out
));
1159 static time_t get_semotime(struct sem_array
*sma
)
1164 res
= sma
->sem_base
[0].sem_otime
;
1165 for (i
= 1; i
< sma
->sem_nsems
; i
++) {
1166 time_t to
= sma
->sem_base
[i
].sem_otime
;
1174 static int semctl_nolock(struct ipc_namespace
*ns
, int semid
,
1175 int cmd
, int version
, void __user
*p
)
1178 struct sem_array
*sma
;
1184 struct seminfo seminfo
;
1187 err
= security_sem_semctl(NULL
, cmd
);
1191 memset(&seminfo
, 0, sizeof(seminfo
));
1192 seminfo
.semmni
= ns
->sc_semmni
;
1193 seminfo
.semmns
= ns
->sc_semmns
;
1194 seminfo
.semmsl
= ns
->sc_semmsl
;
1195 seminfo
.semopm
= ns
->sc_semopm
;
1196 seminfo
.semvmx
= SEMVMX
;
1197 seminfo
.semmnu
= SEMMNU
;
1198 seminfo
.semmap
= SEMMAP
;
1199 seminfo
.semume
= SEMUME
;
1200 down_read(&sem_ids(ns
).rwsem
);
1201 if (cmd
== SEM_INFO
) {
1202 seminfo
.semusz
= sem_ids(ns
).in_use
;
1203 seminfo
.semaem
= ns
->used_sems
;
1205 seminfo
.semusz
= SEMUSZ
;
1206 seminfo
.semaem
= SEMAEM
;
1208 max_id
= ipc_get_maxid(&sem_ids(ns
));
1209 up_read(&sem_ids(ns
).rwsem
);
1210 if (copy_to_user(p
, &seminfo
, sizeof(struct seminfo
)))
1212 return (max_id
< 0) ? 0 : max_id
;
1217 struct semid64_ds tbuf
;
1220 memset(&tbuf
, 0, sizeof(tbuf
));
1223 if (cmd
== SEM_STAT
) {
1224 sma
= sem_obtain_object(ns
, semid
);
1229 id
= sma
->sem_perm
.id
;
1231 sma
= sem_obtain_object_check(ns
, semid
);
1239 if (ipcperms(ns
, &sma
->sem_perm
, S_IRUGO
))
1242 err
= security_sem_semctl(sma
, cmd
);
1246 kernel_to_ipc64_perm(&sma
->sem_perm
, &tbuf
.sem_perm
);
1247 tbuf
.sem_otime
= get_semotime(sma
);
1248 tbuf
.sem_ctime
= sma
->sem_ctime
;
1249 tbuf
.sem_nsems
= sma
->sem_nsems
;
1251 if (copy_semid_to_user(p
, &tbuf
, version
))
1263 static int semctl_setval(struct ipc_namespace
*ns
, int semid
, int semnum
,
1266 struct sem_undo
*un
;
1267 struct sem_array
*sma
;
1270 struct list_head tasks
;
1272 #if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1273 /* big-endian 64bit */
1276 /* 32bit or little-endian 64bit */
1280 if (val
> SEMVMX
|| val
< 0)
1283 INIT_LIST_HEAD(&tasks
);
1286 sma
= sem_obtain_object_check(ns
, semid
);
1289 return PTR_ERR(sma
);
1292 if (semnum
< 0 || semnum
>= sma
->sem_nsems
) {
1298 if (ipcperms(ns
, &sma
->sem_perm
, S_IWUGO
)) {
1303 err
= security_sem_semctl(sma
, SETVAL
);
1309 sem_lock(sma
, NULL
, -1);
1311 if (!ipc_valid_object(&sma
->sem_perm
)) {
1312 sem_unlock(sma
, -1);
1317 curr
= &sma
->sem_base
[semnum
];
1319 ipc_assert_locked_object(&sma
->sem_perm
);
1320 list_for_each_entry(un
, &sma
->list_id
, list_id
)
1321 un
->semadj
[semnum
] = 0;
1324 curr
->sempid
= task_tgid_vnr(current
);
1325 sma
->sem_ctime
= get_seconds();
1326 /* maybe some queued-up processes were waiting for this */
1327 do_smart_update(sma
, NULL
, 0, 0, &tasks
);
1328 sem_unlock(sma
, -1);
1330 wake_up_sem_queue_do(&tasks
);
1334 static int semctl_main(struct ipc_namespace
*ns
, int semid
, int semnum
,
1335 int cmd
, void __user
*p
)
1337 struct sem_array
*sma
;
1340 ushort fast_sem_io
[SEMMSL_FAST
];
1341 ushort
*sem_io
= fast_sem_io
;
1342 struct list_head tasks
;
1344 INIT_LIST_HEAD(&tasks
);
1347 sma
= sem_obtain_object_check(ns
, semid
);
1350 return PTR_ERR(sma
);
1353 nsems
= sma
->sem_nsems
;
1356 if (ipcperms(ns
, &sma
->sem_perm
, cmd
== SETALL
? S_IWUGO
: S_IRUGO
))
1357 goto out_rcu_wakeup
;
1359 err
= security_sem_semctl(sma
, cmd
);
1361 goto out_rcu_wakeup
;
1367 ushort __user
*array
= p
;
1370 sem_lock(sma
, NULL
, -1);
1371 if (!ipc_valid_object(&sma
->sem_perm
)) {
1375 if (nsems
> SEMMSL_FAST
) {
1376 if (!ipc_rcu_getref(sma
)) {
1380 sem_unlock(sma
, -1);
1382 sem_io
= ipc_alloc(sizeof(ushort
)*nsems
);
1383 if (sem_io
== NULL
) {
1384 ipc_rcu_putref(sma
, sem_rcu_free
);
1389 sem_lock_and_putref(sma
);
1390 if (!ipc_valid_object(&sma
->sem_perm
)) {
1395 for (i
= 0; i
< sma
->sem_nsems
; i
++)
1396 sem_io
[i
] = sma
->sem_base
[i
].semval
;
1397 sem_unlock(sma
, -1);
1400 if (copy_to_user(array
, sem_io
, nsems
*sizeof(ushort
)))
1407 struct sem_undo
*un
;
1409 if (!ipc_rcu_getref(sma
)) {
1411 goto out_rcu_wakeup
;
1415 if (nsems
> SEMMSL_FAST
) {
1416 sem_io
= ipc_alloc(sizeof(ushort
)*nsems
);
1417 if (sem_io
== NULL
) {
1418 ipc_rcu_putref(sma
, sem_rcu_free
);
1423 if (copy_from_user(sem_io
, p
, nsems
*sizeof(ushort
))) {
1424 ipc_rcu_putref(sma
, sem_rcu_free
);
1429 for (i
= 0; i
< nsems
; i
++) {
1430 if (sem_io
[i
] > SEMVMX
) {
1431 ipc_rcu_putref(sma
, sem_rcu_free
);
1437 sem_lock_and_putref(sma
);
1438 if (!ipc_valid_object(&sma
->sem_perm
)) {
1443 for (i
= 0; i
< nsems
; i
++) {
1444 sma
->sem_base
[i
].semval
= sem_io
[i
];
1445 sma
->sem_base
[i
].sempid
= task_tgid_vnr(current
);
1448 ipc_assert_locked_object(&sma
->sem_perm
);
1449 list_for_each_entry(un
, &sma
->list_id
, list_id
) {
1450 for (i
= 0; i
< nsems
; i
++)
1453 sma
->sem_ctime
= get_seconds();
1454 /* maybe some queued-up processes were waiting for this */
1455 do_smart_update(sma
, NULL
, 0, 0, &tasks
);
1459 /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1462 if (semnum
< 0 || semnum
>= nsems
)
1463 goto out_rcu_wakeup
;
1465 sem_lock(sma
, NULL
, -1);
1466 if (!ipc_valid_object(&sma
->sem_perm
)) {
1470 curr
= &sma
->sem_base
[semnum
];
1480 err
= count_semcnt(sma
, semnum
, 0);
1483 err
= count_semcnt(sma
, semnum
, 1);
1488 sem_unlock(sma
, -1);
1491 wake_up_sem_queue_do(&tasks
);
1493 if (sem_io
!= fast_sem_io
)
1498 static inline unsigned long
1499 copy_semid_from_user(struct semid64_ds
*out
, void __user
*buf
, int version
)
1503 if (copy_from_user(out
, buf
, sizeof(*out
)))
1508 struct semid_ds tbuf_old
;
1510 if (copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
1513 out
->sem_perm
.uid
= tbuf_old
.sem_perm
.uid
;
1514 out
->sem_perm
.gid
= tbuf_old
.sem_perm
.gid
;
1515 out
->sem_perm
.mode
= tbuf_old
.sem_perm
.mode
;
1525 * This function handles some semctl commands which require the rwsem
1526 * to be held in write mode.
1527 * NOTE: no locks must be held, the rwsem is taken inside this function.
1529 static int semctl_down(struct ipc_namespace
*ns
, int semid
,
1530 int cmd
, int version
, void __user
*p
)
1532 struct sem_array
*sma
;
1534 struct semid64_ds semid64
;
1535 struct kern_ipc_perm
*ipcp
;
1537 if (cmd
== IPC_SET
) {
1538 if (copy_semid_from_user(&semid64
, p
, version
))
1542 down_write(&sem_ids(ns
).rwsem
);
1545 ipcp
= ipcctl_pre_down_nolock(ns
, &sem_ids(ns
), semid
, cmd
,
1546 &semid64
.sem_perm
, 0);
1548 err
= PTR_ERR(ipcp
);
1552 sma
= container_of(ipcp
, struct sem_array
, sem_perm
);
1554 err
= security_sem_semctl(sma
, cmd
);
1560 sem_lock(sma
, NULL
, -1);
1561 /* freeary unlocks the ipc object and rcu */
1565 sem_lock(sma
, NULL
, -1);
1566 err
= ipc_update_perm(&semid64
.sem_perm
, ipcp
);
1569 sma
->sem_ctime
= get_seconds();
1577 sem_unlock(sma
, -1);
1581 up_write(&sem_ids(ns
).rwsem
);
1585 SYSCALL_DEFINE4(semctl
, int, semid
, int, semnum
, int, cmd
, unsigned long, arg
)
1588 struct ipc_namespace
*ns
;
1589 void __user
*p
= (void __user
*)arg
;
1594 version
= ipc_parse_version(&cmd
);
1595 ns
= current
->nsproxy
->ipc_ns
;
1602 return semctl_nolock(ns
, semid
, cmd
, version
, p
);
1609 return semctl_main(ns
, semid
, semnum
, cmd
, p
);
1611 return semctl_setval(ns
, semid
, semnum
, arg
);
1614 return semctl_down(ns
, semid
, cmd
, version
, p
);
1620 /* If the task doesn't already have a undo_list, then allocate one
1621 * here. We guarantee there is only one thread using this undo list,
1622 * and current is THE ONE
1624 * If this allocation and assignment succeeds, but later
1625 * portions of this code fail, there is no need to free the sem_undo_list.
1626 * Just let it stay associated with the task, and it'll be freed later
1629 * This can block, so callers must hold no locks.
1631 static inline int get_undo_list(struct sem_undo_list
**undo_listp
)
1633 struct sem_undo_list
*undo_list
;
1635 undo_list
= current
->sysvsem
.undo_list
;
1637 undo_list
= kzalloc(sizeof(*undo_list
), GFP_KERNEL
);
1638 if (undo_list
== NULL
)
1640 spin_lock_init(&undo_list
->lock
);
1641 atomic_set(&undo_list
->refcnt
, 1);
1642 INIT_LIST_HEAD(&undo_list
->list_proc
);
1644 current
->sysvsem
.undo_list
= undo_list
;
1646 *undo_listp
= undo_list
;
1650 static struct sem_undo
*__lookup_undo(struct sem_undo_list
*ulp
, int semid
)
1652 struct sem_undo
*un
;
1654 list_for_each_entry_rcu(un
, &ulp
->list_proc
, list_proc
) {
1655 if (un
->semid
== semid
)
1661 static struct sem_undo
*lookup_undo(struct sem_undo_list
*ulp
, int semid
)
1663 struct sem_undo
*un
;
1665 assert_spin_locked(&ulp
->lock
);
1667 un
= __lookup_undo(ulp
, semid
);
1669 list_del_rcu(&un
->list_proc
);
1670 list_add_rcu(&un
->list_proc
, &ulp
->list_proc
);
1676 * find_alloc_undo - lookup (and if not present create) undo array
1678 * @semid: semaphore array id
1680 * The function looks up (and if not present creates) the undo structure.
1681 * The size of the undo structure depends on the size of the semaphore
1682 * array, thus the alloc path is not that straightforward.
1683 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1684 * performs a rcu_read_lock().
1686 static struct sem_undo
*find_alloc_undo(struct ipc_namespace
*ns
, int semid
)
1688 struct sem_array
*sma
;
1689 struct sem_undo_list
*ulp
;
1690 struct sem_undo
*un
, *new;
1693 error
= get_undo_list(&ulp
);
1695 return ERR_PTR(error
);
1698 spin_lock(&ulp
->lock
);
1699 un
= lookup_undo(ulp
, semid
);
1700 spin_unlock(&ulp
->lock
);
1701 if (likely(un
!= NULL
))
1704 /* no undo structure around - allocate one. */
1705 /* step 1: figure out the size of the semaphore array */
1706 sma
= sem_obtain_object_check(ns
, semid
);
1709 return ERR_CAST(sma
);
1712 nsems
= sma
->sem_nsems
;
1713 if (!ipc_rcu_getref(sma
)) {
1715 un
= ERR_PTR(-EIDRM
);
1720 /* step 2: allocate new undo structure */
1721 new = kzalloc(sizeof(struct sem_undo
) + sizeof(short)*nsems
, GFP_KERNEL
);
1723 ipc_rcu_putref(sma
, sem_rcu_free
);
1724 return ERR_PTR(-ENOMEM
);
1727 /* step 3: Acquire the lock on semaphore array */
1729 sem_lock_and_putref(sma
);
1730 if (!ipc_valid_object(&sma
->sem_perm
)) {
1731 sem_unlock(sma
, -1);
1734 un
= ERR_PTR(-EIDRM
);
1737 spin_lock(&ulp
->lock
);
1740 * step 4: check for races: did someone else allocate the undo struct?
1742 un
= lookup_undo(ulp
, semid
);
1747 /* step 5: initialize & link new undo structure */
1748 new->semadj
= (short *) &new[1];
1751 assert_spin_locked(&ulp
->lock
);
1752 list_add_rcu(&new->list_proc
, &ulp
->list_proc
);
1753 ipc_assert_locked_object(&sma
->sem_perm
);
1754 list_add(&new->list_id
, &sma
->list_id
);
1758 spin_unlock(&ulp
->lock
);
1759 sem_unlock(sma
, -1);
1766 * get_queue_result - retrieve the result code from sem_queue
1767 * @q: Pointer to queue structure
1769 * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
1770 * q->status, then we must loop until the value is replaced with the final
1771 * value: This may happen if a task is woken up by an unrelated event (e.g.
1772 * signal) and in parallel the task is woken up by another task because it got
1773 * the requested semaphores.
1775 * The function can be called with or without holding the semaphore spinlock.
1777 static int get_queue_result(struct sem_queue
*q
)
1782 while (unlikely(error
== IN_WAKEUP
)) {
1790 SYSCALL_DEFINE4(semtimedop
, int, semid
, struct sembuf __user
*, tsops
,
1791 unsigned, nsops
, const struct timespec __user
*, timeout
)
1793 int error
= -EINVAL
;
1794 struct sem_array
*sma
;
1795 struct sembuf fast_sops
[SEMOPM_FAST
];
1796 struct sembuf
*sops
= fast_sops
, *sop
;
1797 struct sem_undo
*un
;
1798 int undos
= 0, alter
= 0, max
, locknum
;
1799 struct sem_queue queue
;
1800 unsigned long jiffies_left
= 0;
1801 struct ipc_namespace
*ns
;
1802 struct list_head tasks
;
1804 ns
= current
->nsproxy
->ipc_ns
;
1806 if (nsops
< 1 || semid
< 0)
1808 if (nsops
> ns
->sc_semopm
)
1810 if (nsops
> SEMOPM_FAST
) {
1811 sops
= kmalloc(sizeof(*sops
)*nsops
, GFP_KERNEL
);
1815 if (copy_from_user(sops
, tsops
, nsops
* sizeof(*tsops
))) {
1820 struct timespec _timeout
;
1821 if (copy_from_user(&_timeout
, timeout
, sizeof(*timeout
))) {
1825 if (_timeout
.tv_sec
< 0 || _timeout
.tv_nsec
< 0 ||
1826 _timeout
.tv_nsec
>= 1000000000L) {
1830 jiffies_left
= timespec_to_jiffies(&_timeout
);
1833 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
1834 if (sop
->sem_num
>= max
)
1836 if (sop
->sem_flg
& SEM_UNDO
)
1838 if (sop
->sem_op
!= 0)
1842 INIT_LIST_HEAD(&tasks
);
1845 /* On success, find_alloc_undo takes the rcu_read_lock */
1846 un
= find_alloc_undo(ns
, semid
);
1848 error
= PTR_ERR(un
);
1856 sma
= sem_obtain_object_check(ns
, semid
);
1859 error
= PTR_ERR(sma
);
1864 if (max
>= sma
->sem_nsems
)
1865 goto out_rcu_wakeup
;
1868 if (ipcperms(ns
, &sma
->sem_perm
, alter
? S_IWUGO
: S_IRUGO
))
1869 goto out_rcu_wakeup
;
1871 error
= security_sem_semop(sma
, sops
, nsops
, alter
);
1873 goto out_rcu_wakeup
;
1876 locknum
= sem_lock(sma
, sops
, nsops
);
1878 * We eventually might perform the following check in a lockless
1879 * fashion, considering ipc_valid_object() locking constraints.
1880 * If nsops == 1 and there is no contention for sem_perm.lock, then
1881 * only a per-semaphore lock is held and it's OK to proceed with the
1882 * check below. More details on the fine grained locking scheme
1883 * entangled here and why it's RMID race safe on comments at sem_lock()
1885 if (!ipc_valid_object(&sma
->sem_perm
))
1886 goto out_unlock_free
;
1888 * semid identifiers are not unique - find_alloc_undo may have
1889 * allocated an undo structure, it was invalidated by an RMID
1890 * and now a new array with received the same id. Check and fail.
1891 * This case can be detected checking un->semid. The existence of
1892 * "un" itself is guaranteed by rcu.
1894 if (un
&& un
->semid
== -1)
1895 goto out_unlock_free
;
1898 queue
.nsops
= nsops
;
1900 queue
.pid
= task_tgid_vnr(current
);
1901 queue
.alter
= alter
;
1903 error
= perform_atomic_semop(sma
, &queue
);
1905 /* If the operation was successful, then do
1906 * the required updates.
1909 do_smart_update(sma
, sops
, nsops
, 1, &tasks
);
1911 set_semotime(sma
, sops
);
1914 goto out_unlock_free
;
1916 /* We need to sleep on this operation, so we put the current
1917 * task into the pending queue and go to sleep.
1922 curr
= &sma
->sem_base
[sops
->sem_num
];
1925 if (sma
->complex_count
) {
1926 list_add_tail(&queue
.list
,
1927 &sma
->pending_alter
);
1930 list_add_tail(&queue
.list
,
1931 &curr
->pending_alter
);
1934 list_add_tail(&queue
.list
, &curr
->pending_const
);
1937 if (!sma
->complex_count
)
1941 list_add_tail(&queue
.list
, &sma
->pending_alter
);
1943 list_add_tail(&queue
.list
, &sma
->pending_const
);
1945 sma
->complex_count
++;
1948 queue
.status
= -EINTR
;
1949 queue
.sleeper
= current
;
1952 __set_current_state(TASK_INTERRUPTIBLE
);
1953 sem_unlock(sma
, locknum
);
1957 jiffies_left
= schedule_timeout(jiffies_left
);
1961 error
= get_queue_result(&queue
);
1963 if (error
!= -EINTR
) {
1964 /* fast path: update_queue already obtained all requested
1966 * Perform a smp_mb(): User space could assume that semop()
1967 * is a memory barrier: Without the mb(), the cpu could
1968 * speculatively read in user space stale data that was
1969 * overwritten by the previous owner of the semaphore.
1977 sma
= sem_obtain_lock(ns
, semid
, sops
, nsops
, &locknum
);
1980 * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
1982 error
= get_queue_result(&queue
);
1985 * Array removed? If yes, leave without sem_unlock().
1994 * If queue.status != -EINTR we are woken up by another process.
1995 * Leave without unlink_queue(), but with sem_unlock().
1997 if (error
!= -EINTR
)
1998 goto out_unlock_free
;
2001 * If an interrupt occurred we have to clean up the queue
2003 if (timeout
&& jiffies_left
== 0)
2007 * If the wakeup was spurious, just retry
2009 if (error
== -EINTR
&& !signal_pending(current
))
2012 unlink_queue(sma
, &queue
);
2015 sem_unlock(sma
, locknum
);
2018 wake_up_sem_queue_do(&tasks
);
2020 if (sops
!= fast_sops
)
2025 SYSCALL_DEFINE3(semop
, int, semid
, struct sembuf __user
*, tsops
,
2028 return sys_semtimedop(semid
, tsops
, nsops
, NULL
);
2031 /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2032 * parent and child tasks.
2035 int copy_semundo(unsigned long clone_flags
, struct task_struct
*tsk
)
2037 struct sem_undo_list
*undo_list
;
2040 if (clone_flags
& CLONE_SYSVSEM
) {
2041 error
= get_undo_list(&undo_list
);
2044 atomic_inc(&undo_list
->refcnt
);
2045 tsk
->sysvsem
.undo_list
= undo_list
;
2047 tsk
->sysvsem
.undo_list
= NULL
;
2053 * add semadj values to semaphores, free undo structures.
2054 * undo structures are not freed when semaphore arrays are destroyed
2055 * so some of them may be out of date.
2056 * IMPLEMENTATION NOTE: There is some confusion over whether the
2057 * set of adjustments that needs to be done should be done in an atomic
2058 * manner or not. That is, if we are attempting to decrement the semval
2059 * should we queue up and wait until we can do so legally?
2060 * The original implementation attempted to do this (queue and wait).
2061 * The current implementation does not do so. The POSIX standard
2062 * and SVID should be consulted to determine what behavior is mandated.
2064 void exit_sem(struct task_struct
*tsk
)
2066 struct sem_undo_list
*ulp
;
2068 ulp
= tsk
->sysvsem
.undo_list
;
2071 tsk
->sysvsem
.undo_list
= NULL
;
2073 if (!atomic_dec_and_test(&ulp
->refcnt
))
2077 struct sem_array
*sma
;
2078 struct sem_undo
*un
;
2079 struct list_head tasks
;
2083 un
= list_entry_rcu(ulp
->list_proc
.next
,
2084 struct sem_undo
, list_proc
);
2085 if (&un
->list_proc
== &ulp
->list_proc
) {
2087 * We must wait for freeary() before freeing this ulp,
2088 * in case we raced with last sem_undo. There is a small
2089 * possibility where we exit while freeary() didn't
2090 * finish unlocking sem_undo_list.
2092 spin_unlock_wait(&ulp
->lock
);
2096 spin_lock(&ulp
->lock
);
2098 spin_unlock(&ulp
->lock
);
2100 /* exit_sem raced with IPC_RMID, nothing to do */
2106 sma
= sem_obtain_object_check(tsk
->nsproxy
->ipc_ns
, semid
);
2107 /* exit_sem raced with IPC_RMID, nothing to do */
2113 sem_lock(sma
, NULL
, -1);
2114 /* exit_sem raced with IPC_RMID, nothing to do */
2115 if (!ipc_valid_object(&sma
->sem_perm
)) {
2116 sem_unlock(sma
, -1);
2120 un
= __lookup_undo(ulp
, semid
);
2122 /* exit_sem raced with IPC_RMID+semget() that created
2123 * exactly the same semid. Nothing to do.
2125 sem_unlock(sma
, -1);
2130 /* remove un from the linked lists */
2131 ipc_assert_locked_object(&sma
->sem_perm
);
2132 list_del(&un
->list_id
);
2134 /* we are the last process using this ulp, acquiring ulp->lock
2135 * isn't required. Besides that, we are also protected against
2136 * IPC_RMID as we hold sma->sem_perm lock now
2138 list_del_rcu(&un
->list_proc
);
2140 /* perform adjustments registered in un */
2141 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
2142 struct sem
*semaphore
= &sma
->sem_base
[i
];
2143 if (un
->semadj
[i
]) {
2144 semaphore
->semval
+= un
->semadj
[i
];
2146 * Range checks of the new semaphore value,
2147 * not defined by sus:
2148 * - Some unices ignore the undo entirely
2149 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
2150 * - some cap the value (e.g. FreeBSD caps
2151 * at 0, but doesn't enforce SEMVMX)
2153 * Linux caps the semaphore value, both at 0
2156 * Manfred <manfred@colorfullife.com>
2158 if (semaphore
->semval
< 0)
2159 semaphore
->semval
= 0;
2160 if (semaphore
->semval
> SEMVMX
)
2161 semaphore
->semval
= SEMVMX
;
2162 semaphore
->sempid
= task_tgid_vnr(current
);
2165 /* maybe some queued-up processes were waiting for this */
2166 INIT_LIST_HEAD(&tasks
);
2167 do_smart_update(sma
, NULL
, 0, 1, &tasks
);
2168 sem_unlock(sma
, -1);
2170 wake_up_sem_queue_do(&tasks
);
2177 #ifdef CONFIG_PROC_FS
2178 static int sysvipc_sem_proc_show(struct seq_file
*s
, void *it
)
2180 struct user_namespace
*user_ns
= seq_user_ns(s
);
2181 struct sem_array
*sma
= it
;
2185 * The proc interface isn't aware of sem_lock(), it calls
2186 * ipc_lock_object() directly (in sysvipc_find_ipc).
2187 * In order to stay compatible with sem_lock(), we must wait until
2188 * all simple semop() calls have left their critical regions.
2190 sem_wait_array(sma
);
2192 sem_otime
= get_semotime(sma
);
2195 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
2200 from_kuid_munged(user_ns
, sma
->sem_perm
.uid
),
2201 from_kgid_munged(user_ns
, sma
->sem_perm
.gid
),
2202 from_kuid_munged(user_ns
, sma
->sem_perm
.cuid
),
2203 from_kgid_munged(user_ns
, sma
->sem_perm
.cgid
),