Merge branch 'for-4.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[deliverable/linux.git] / arch / s390 / lib / spinlock.c
1 /*
2 * Out of line spinlock code.
3 *
4 * Copyright IBM Corp. 2004, 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */
7
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <asm/io.h>
14
15 int spin_retry = -1;
16
17 static int __init spin_retry_init(void)
18 {
19 if (spin_retry < 0)
20 spin_retry = MACHINE_HAS_CAD ? 10 : 1000;
21 return 0;
22 }
23 early_initcall(spin_retry_init);
24
25 /**
26 * spin_retry= parameter
27 */
28 static int __init spin_retry_setup(char *str)
29 {
30 spin_retry = simple_strtoul(str, &str, 0);
31 return 1;
32 }
33 __setup("spin_retry=", spin_retry_setup);
34
35 static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
36 {
37 asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
38 }
39
40 static inline int cpu_is_preempted(int cpu)
41 {
42 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
43 return 0;
44 if (smp_vcpu_scheduled(cpu))
45 return 0;
46 return 1;
47 }
48
49 void arch_spin_lock_wait(arch_spinlock_t *lp)
50 {
51 unsigned int cpu = SPINLOCK_LOCKVAL;
52 unsigned int owner;
53 int count, first_diag;
54
55 first_diag = 1;
56 while (1) {
57 owner = ACCESS_ONCE(lp->lock);
58 /* Try to get the lock if it is free. */
59 if (!owner) {
60 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
61 return;
62 continue;
63 }
64 /* First iteration: check if the lock owner is running. */
65 if (first_diag && cpu_is_preempted(~owner)) {
66 smp_yield_cpu(~owner);
67 first_diag = 0;
68 continue;
69 }
70 /* Loop for a while on the lock value. */
71 count = spin_retry;
72 do {
73 if (MACHINE_HAS_CAD)
74 _raw_compare_and_delay(&lp->lock, owner);
75 owner = ACCESS_ONCE(lp->lock);
76 } while (owner && count-- > 0);
77 if (!owner)
78 continue;
79 /*
80 * For multiple layers of hypervisors, e.g. z/VM + LPAR
81 * yield the CPU unconditionally. For LPAR rely on the
82 * sense running status.
83 */
84 if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
85 smp_yield_cpu(~owner);
86 first_diag = 0;
87 }
88 }
89 }
90 EXPORT_SYMBOL(arch_spin_lock_wait);
91
92 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
93 {
94 unsigned int cpu = SPINLOCK_LOCKVAL;
95 unsigned int owner;
96 int count, first_diag;
97
98 local_irq_restore(flags);
99 first_diag = 1;
100 while (1) {
101 owner = ACCESS_ONCE(lp->lock);
102 /* Try to get the lock if it is free. */
103 if (!owner) {
104 local_irq_disable();
105 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
106 return;
107 local_irq_restore(flags);
108 continue;
109 }
110 /* Check if the lock owner is running. */
111 if (first_diag && cpu_is_preempted(~owner)) {
112 smp_yield_cpu(~owner);
113 first_diag = 0;
114 continue;
115 }
116 /* Loop for a while on the lock value. */
117 count = spin_retry;
118 do {
119 if (MACHINE_HAS_CAD)
120 _raw_compare_and_delay(&lp->lock, owner);
121 owner = ACCESS_ONCE(lp->lock);
122 } while (owner && count-- > 0);
123 if (!owner)
124 continue;
125 /*
126 * For multiple layers of hypervisors, e.g. z/VM + LPAR
127 * yield the CPU unconditionally. For LPAR rely on the
128 * sense running status.
129 */
130 if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
131 smp_yield_cpu(~owner);
132 first_diag = 0;
133 }
134 }
135 }
136 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
137
138 int arch_spin_trylock_retry(arch_spinlock_t *lp)
139 {
140 unsigned int cpu = SPINLOCK_LOCKVAL;
141 unsigned int owner;
142 int count;
143
144 for (count = spin_retry; count > 0; count--) {
145 owner = ACCESS_ONCE(lp->lock);
146 /* Try to get the lock if it is free. */
147 if (!owner) {
148 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
149 return 1;
150 } else if (MACHINE_HAS_CAD)
151 _raw_compare_and_delay(&lp->lock, owner);
152 }
153 return 0;
154 }
155 EXPORT_SYMBOL(arch_spin_trylock_retry);
156
157 void _raw_read_lock_wait(arch_rwlock_t *rw)
158 {
159 unsigned int owner, old;
160 int count = spin_retry;
161
162 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
163 __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
164 #endif
165 owner = 0;
166 while (1) {
167 if (count-- <= 0) {
168 if (owner && cpu_is_preempted(~owner))
169 smp_yield_cpu(~owner);
170 count = spin_retry;
171 }
172 old = ACCESS_ONCE(rw->lock);
173 owner = ACCESS_ONCE(rw->owner);
174 if ((int) old < 0) {
175 if (MACHINE_HAS_CAD)
176 _raw_compare_and_delay(&rw->lock, old);
177 continue;
178 }
179 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
180 return;
181 }
182 }
183 EXPORT_SYMBOL(_raw_read_lock_wait);
184
185 int _raw_read_trylock_retry(arch_rwlock_t *rw)
186 {
187 unsigned int old;
188 int count = spin_retry;
189
190 while (count-- > 0) {
191 old = ACCESS_ONCE(rw->lock);
192 if ((int) old < 0) {
193 if (MACHINE_HAS_CAD)
194 _raw_compare_and_delay(&rw->lock, old);
195 continue;
196 }
197 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
198 return 1;
199 }
200 return 0;
201 }
202 EXPORT_SYMBOL(_raw_read_trylock_retry);
203
204 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
205
206 void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
207 {
208 unsigned int owner, old;
209 int count = spin_retry;
210
211 owner = 0;
212 while (1) {
213 if (count-- <= 0) {
214 if (owner && cpu_is_preempted(~owner))
215 smp_yield_cpu(~owner);
216 count = spin_retry;
217 }
218 old = ACCESS_ONCE(rw->lock);
219 owner = ACCESS_ONCE(rw->owner);
220 smp_mb();
221 if ((int) old >= 0) {
222 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
223 old = prev;
224 }
225 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
226 break;
227 if (MACHINE_HAS_CAD)
228 _raw_compare_and_delay(&rw->lock, old);
229 }
230 }
231 EXPORT_SYMBOL(_raw_write_lock_wait);
232
233 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
234
235 void _raw_write_lock_wait(arch_rwlock_t *rw)
236 {
237 unsigned int owner, old, prev;
238 int count = spin_retry;
239
240 prev = 0x80000000;
241 owner = 0;
242 while (1) {
243 if (count-- <= 0) {
244 if (owner && cpu_is_preempted(~owner))
245 smp_yield_cpu(~owner);
246 count = spin_retry;
247 }
248 old = ACCESS_ONCE(rw->lock);
249 owner = ACCESS_ONCE(rw->owner);
250 if ((int) old >= 0 &&
251 _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
252 prev = old;
253 else
254 smp_mb();
255 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
256 break;
257 if (MACHINE_HAS_CAD)
258 _raw_compare_and_delay(&rw->lock, old);
259 }
260 }
261 EXPORT_SYMBOL(_raw_write_lock_wait);
262
263 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
264
265 int _raw_write_trylock_retry(arch_rwlock_t *rw)
266 {
267 unsigned int old;
268 int count = spin_retry;
269
270 while (count-- > 0) {
271 old = ACCESS_ONCE(rw->lock);
272 if (old) {
273 if (MACHINE_HAS_CAD)
274 _raw_compare_and_delay(&rw->lock, old);
275 continue;
276 }
277 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
278 return 1;
279 }
280 return 0;
281 }
282 EXPORT_SYMBOL(_raw_write_trylock_retry);
283
284 void arch_lock_relax(unsigned int cpu)
285 {
286 if (!cpu)
287 return;
288 if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
289 return;
290 smp_yield_cpu(~cpu);
291 }
292 EXPORT_SYMBOL(arch_lock_relax);
This page took 0.036378 seconds and 5 git commands to generate.