[NETFILTER]: ctnetlink: check for status attribute existence on conntrack creation
[deliverable/linux.git] / include / asm-s390 / rwsem.h
CommitLineData
1da177e4
LT
1#ifndef _S390_RWSEM_H
2#define _S390_RWSEM_H
3
4/*
5 * include/asm-s390/rwsem.h
6 *
7 * S390 version
8 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
10 *
11 * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
12 */
13
14/*
15 *
16 * The MSW of the count is the negated number of active writers and waiting
17 * lockers, and the LSW is the total number of active locks
18 *
19 * The lock count is initialized to 0 (no active and no waiting lockers).
20 *
21 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
22 * uncontended lock. This can be determined because XADD returns the old value.
23 * Readers increment by 1 and see a positive value when uncontended, negative
24 * if there are writers (and maybe) readers waiting (in which case it goes to
25 * sleep).
26 *
27 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
28 * be extended to 65534 by manually checking the whole MSW rather than relying
29 * on the S flag.
30 *
31 * The value of ACTIVE_BIAS supports up to 65535 active processes.
32 *
33 * This should be totally fair - if anything is waiting, a process that wants a
34 * lock will go to the back of the queue. When the currently active lock is
35 * released, if there's a writer at the front of the queue, then that and only
36 * that will be woken up; if there's a bunch of consequtive readers at the
37 * front, then they'll all be woken up, but no other readers will be.
38 */
39
40#ifndef _LINUX_RWSEM_H
41#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
42#endif
43
44#ifdef __KERNEL__
45
46#include <linux/list.h>
47#include <linux/spinlock.h>
48
49struct rwsem_waiter;
50
51extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *);
52extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *);
53extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
54extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *);
55extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *);
56
57/*
58 * the semaphore definition
59 */
60struct rw_semaphore {
61 signed long count;
62 spinlock_t wait_lock;
63 struct list_head wait_list;
4ea2176d
IM
64#ifdef CONFIG_DEBUG_LOCK_ALLOC
65 struct lockdep_map dep_map;
66#endif
1da177e4
LT
67};
68
69#ifndef __s390x__
70#define RWSEM_UNLOCKED_VALUE 0x00000000
71#define RWSEM_ACTIVE_BIAS 0x00000001
72#define RWSEM_ACTIVE_MASK 0x0000ffff
73#define RWSEM_WAITING_BIAS (-0x00010000)
74#else /* __s390x__ */
75#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
76#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
77#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
78#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
79#endif /* __s390x__ */
80#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
81#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
82
83/*
84 * initialisation
85 */
4ea2176d
IM
86
87#ifdef CONFIG_DEBUG_LOCK_ALLOC
88# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
89#else
90# define __RWSEM_DEP_MAP_INIT(lockname)
91#endif
92
1da177e4 93#define __RWSEM_INITIALIZER(name) \
4ea2176d
IM
94{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
95 __RWSEM_DEP_MAP_INIT(name) }
1da177e4
LT
96
97#define DECLARE_RWSEM(name) \
98 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
99
100static inline void init_rwsem(struct rw_semaphore *sem)
101{
102 sem->count = RWSEM_UNLOCKED_VALUE;
103 spin_lock_init(&sem->wait_lock);
104 INIT_LIST_HEAD(&sem->wait_list);
105}
106
4ea2176d
IM
107extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
108 struct lock_class_key *key);
109
110#define init_rwsem(sem) \
111do { \
112 static struct lock_class_key __key; \
113 \
114 __init_rwsem((sem), #sem, &__key); \
115} while (0)
116
117
1da177e4
LT
118/*
119 * lock for reading
120 */
121static inline void __down_read(struct rw_semaphore *sem)
122{
123 signed long old, new;
124
94c12cc7 125 asm volatile(
1da177e4 126#ifndef __s390x__
94c12cc7
MS
127 " l %0,0(%3)\n"
128 "0: lr %1,%0\n"
129 " ahi %1,%5\n"
130 " cs %0,%1,0(%3)\n"
131 " jl 0b"
1da177e4 132#else /* __s390x__ */
94c12cc7
MS
133 " lg %0,0(%3)\n"
134 "0: lgr %1,%0\n"
135 " aghi %1,%5\n"
136 " csg %0,%1,0(%3)\n"
137 " jl 0b"
1da177e4 138#endif /* __s390x__ */
94c12cc7 139 : "=&d" (old), "=&d" (new), "=m" (sem->count)
1da177e4 140 : "a" (&sem->count), "m" (sem->count),
94c12cc7 141 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory");
1da177e4
LT
142 if (old < 0)
143 rwsem_down_read_failed(sem);
144}
145
146/*
147 * trylock for reading -- returns 1 if successful, 0 if contention
148 */
149static inline int __down_read_trylock(struct rw_semaphore *sem)
150{
151 signed long old, new;
152
94c12cc7 153 asm volatile(
1da177e4 154#ifndef __s390x__
94c12cc7
MS
155 " l %0,0(%3)\n"
156 "0: ltr %1,%0\n"
157 " jm 1f\n"
158 " ahi %1,%5\n"
159 " cs %0,%1,0(%3)\n"
160 " jl 0b\n"
1da177e4
LT
161 "1:"
162#else /* __s390x__ */
94c12cc7
MS
163 " lg %0,0(%3)\n"
164 "0: ltgr %1,%0\n"
165 " jm 1f\n"
166 " aghi %1,%5\n"
167 " csg %0,%1,0(%3)\n"
168 " jl 0b\n"
1da177e4
LT
169 "1:"
170#endif /* __s390x__ */
94c12cc7 171 : "=&d" (old), "=&d" (new), "=m" (sem->count)
1da177e4 172 : "a" (&sem->count), "m" (sem->count),
94c12cc7 173 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory");
1da177e4
LT
174 return old >= 0 ? 1 : 0;
175}
176
177/*
178 * lock for writing
179 */
4ea2176d 180static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
1da177e4
LT
181{
182 signed long old, new, tmp;
183
184 tmp = RWSEM_ACTIVE_WRITE_BIAS;
94c12cc7 185 asm volatile(
1da177e4 186#ifndef __s390x__
94c12cc7
MS
187 " l %0,0(%3)\n"
188 "0: lr %1,%0\n"
189 " a %1,%5\n"
190 " cs %0,%1,0(%3)\n"
191 " jl 0b"
1da177e4 192#else /* __s390x__ */
94c12cc7
MS
193 " lg %0,0(%3)\n"
194 "0: lgr %1,%0\n"
195 " ag %1,%5\n"
196 " csg %0,%1,0(%3)\n"
197 " jl 0b"
1da177e4 198#endif /* __s390x__ */
94c12cc7 199 : "=&d" (old), "=&d" (new), "=m" (sem->count)
1da177e4 200 : "a" (&sem->count), "m" (sem->count), "m" (tmp)
94c12cc7 201 : "cc", "memory");
1da177e4
LT
202 if (old != 0)
203 rwsem_down_write_failed(sem);
204}
205
4ea2176d
IM
206static inline void __down_write(struct rw_semaphore *sem)
207{
208 __down_write_nested(sem, 0);
209}
210
1da177e4
LT
211/*
212 * trylock for writing -- returns 1 if successful, 0 if contention
213 */
214static inline int __down_write_trylock(struct rw_semaphore *sem)
215{
216 signed long old;
217
94c12cc7 218 asm volatile(
1da177e4 219#ifndef __s390x__
94c12cc7
MS
220 " l %0,0(%2)\n"
221 "0: ltr %0,%0\n"
222 " jnz 1f\n"
223 " cs %0,%4,0(%2)\n"
224 " jl 0b\n"
1da177e4 225#else /* __s390x__ */
94c12cc7
MS
226 " lg %0,0(%2)\n"
227 "0: ltgr %0,%0\n"
228 " jnz 1f\n"
229 " csg %0,%4,0(%2)\n"
230 " jl 0b\n"
1da177e4
LT
231#endif /* __s390x__ */
232 "1:"
94c12cc7 233 : "=&d" (old), "=m" (sem->count)
1da177e4 234 : "a" (&sem->count), "m" (sem->count),
94c12cc7 235 "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory");
1da177e4
LT
236 return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
237}
238
239/*
240 * unlock after reading
241 */
242static inline void __up_read(struct rw_semaphore *sem)
243{
244 signed long old, new;
245
94c12cc7 246 asm volatile(
1da177e4 247#ifndef __s390x__
94c12cc7
MS
248 " l %0,0(%3)\n"
249 "0: lr %1,%0\n"
250 " ahi %1,%5\n"
251 " cs %0,%1,0(%3)\n"
252 " jl 0b"
1da177e4 253#else /* __s390x__ */
94c12cc7
MS
254 " lg %0,0(%3)\n"
255 "0: lgr %1,%0\n"
256 " aghi %1,%5\n"
257 " csg %0,%1,0(%3)\n"
258 " jl 0b"
1da177e4 259#endif /* __s390x__ */
94c12cc7 260 : "=&d" (old), "=&d" (new), "=m" (sem->count)
1da177e4
LT
261 : "a" (&sem->count), "m" (sem->count),
262 "i" (-RWSEM_ACTIVE_READ_BIAS)
94c12cc7 263 : "cc", "memory");
1da177e4
LT
264 if (new < 0)
265 if ((new & RWSEM_ACTIVE_MASK) == 0)
266 rwsem_wake(sem);
267}
268
269/*
270 * unlock after writing
271 */
272static inline void __up_write(struct rw_semaphore *sem)
273{
274 signed long old, new, tmp;
275
276 tmp = -RWSEM_ACTIVE_WRITE_BIAS;
94c12cc7 277 asm volatile(
1da177e4 278#ifndef __s390x__
94c12cc7
MS
279 " l %0,0(%3)\n"
280 "0: lr %1,%0\n"
281 " a %1,%5\n"
282 " cs %0,%1,0(%3)\n"
283 " jl 0b"
1da177e4 284#else /* __s390x__ */
94c12cc7
MS
285 " lg %0,0(%3)\n"
286 "0: lgr %1,%0\n"
287 " ag %1,%5\n"
288 " csg %0,%1,0(%3)\n"
289 " jl 0b"
1da177e4 290#endif /* __s390x__ */
94c12cc7 291 : "=&d" (old), "=&d" (new), "=m" (sem->count)
1da177e4 292 : "a" (&sem->count), "m" (sem->count), "m" (tmp)
94c12cc7 293 : "cc", "memory");
1da177e4
LT
294 if (new < 0)
295 if ((new & RWSEM_ACTIVE_MASK) == 0)
296 rwsem_wake(sem);
297}
298
299/*
300 * downgrade write lock to read lock
301 */
302static inline void __downgrade_write(struct rw_semaphore *sem)
303{
304 signed long old, new, tmp;
305
306 tmp = -RWSEM_WAITING_BIAS;
94c12cc7 307 asm volatile(
1da177e4 308#ifndef __s390x__
94c12cc7
MS
309 " l %0,0(%3)\n"
310 "0: lr %1,%0\n"
311 " a %1,%5\n"
312 " cs %0,%1,0(%3)\n"
313 " jl 0b"
1da177e4 314#else /* __s390x__ */
94c12cc7
MS
315 " lg %0,0(%3)\n"
316 "0: lgr %1,%0\n"
317 " ag %1,%5\n"
318 " csg %0,%1,0(%3)\n"
319 " jl 0b"
1da177e4 320#endif /* __s390x__ */
94c12cc7 321 : "=&d" (old), "=&d" (new), "=m" (sem->count)
1da177e4 322 : "a" (&sem->count), "m" (sem->count), "m" (tmp)
94c12cc7 323 : "cc", "memory");
1da177e4
LT
324 if (new > 1)
325 rwsem_downgrade_wake(sem);
326}
327
328/*
329 * implement atomic add functionality
330 */
331static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
332{
333 signed long old, new;
334
94c12cc7 335 asm volatile(
1da177e4 336#ifndef __s390x__
94c12cc7
MS
337 " l %0,0(%3)\n"
338 "0: lr %1,%0\n"
339 " ar %1,%5\n"
340 " cs %0,%1,0(%3)\n"
341 " jl 0b"
1da177e4 342#else /* __s390x__ */
94c12cc7
MS
343 " lg %0,0(%3)\n"
344 "0: lgr %1,%0\n"
345 " agr %1,%5\n"
346 " csg %0,%1,0(%3)\n"
347 " jl 0b"
1da177e4 348#endif /* __s390x__ */
94c12cc7 349 : "=&d" (old), "=&d" (new), "=m" (sem->count)
1da177e4 350 : "a" (&sem->count), "m" (sem->count), "d" (delta)
94c12cc7 351 : "cc", "memory");
1da177e4
LT
352}
353
354/*
355 * implement exchange and add functionality
356 */
357static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
358{
359 signed long old, new;
360
94c12cc7 361 asm volatile(
1da177e4 362#ifndef __s390x__
94c12cc7
MS
363 " l %0,0(%3)\n"
364 "0: lr %1,%0\n"
365 " ar %1,%5\n"
366 " cs %0,%1,0(%3)\n"
367 " jl 0b"
1da177e4 368#else /* __s390x__ */
94c12cc7
MS
369 " lg %0,0(%3)\n"
370 "0: lgr %1,%0\n"
371 " agr %1,%5\n"
372 " csg %0,%1,0(%3)\n"
373 " jl 0b"
1da177e4 374#endif /* __s390x__ */
94c12cc7 375 : "=&d" (old), "=&d" (new), "=m" (sem->count)
1da177e4 376 : "a" (&sem->count), "m" (sem->count), "d" (delta)
94c12cc7 377 : "cc", "memory");
1da177e4
LT
378 return new;
379}
380
eb92f4ef
RVR
381static inline int rwsem_is_locked(struct rw_semaphore *sem)
382{
383 return (sem->count != 0);
384}
385
1da177e4
LT
386#endif /* __KERNEL__ */
387#endif /* _S390_RWSEM_H */
This page took 0.217704 seconds and 5 git commands to generate.