Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org) | |
7 | * Copyright (c) 1999, 2000 Silicon Graphics, Inc. | |
8 | */ | |
9 | #ifndef _ASM_BITOPS_H | |
10 | #define _ASM_BITOPS_H | |
11 | ||
12 | #include <linux/config.h> | |
13 | #include <linux/compiler.h> | |
14 | #include <linux/types.h> | |
ec917c2c | 15 | #include <asm/bug.h> |
1da177e4 LT |
16 | #include <asm/byteorder.h> /* sigh ... */ |
17 | #include <asm/cpu-features.h> | |
18 | ||
19 | #if (_MIPS_SZLONG == 32) | |
20 | #define SZLONG_LOG 5 | |
21 | #define SZLONG_MASK 31UL | |
aac8aa77 MR |
22 | #define __LL "ll " |
23 | #define __SC "sc " | |
42a3b4f2 | 24 | #define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x)) |
1da177e4 LT |
25 | #elif (_MIPS_SZLONG == 64) |
26 | #define SZLONG_LOG 6 | |
27 | #define SZLONG_MASK 63UL | |
aac8aa77 MR |
28 | #define __LL "lld " |
29 | #define __SC "scd " | |
42a3b4f2 | 30 | #define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x)) |
1da177e4 LT |
31 | #endif |
32 | ||
33 | #ifdef __KERNEL__ | |
34 | ||
35 | #include <asm/interrupt.h> | |
36 | #include <asm/sgidefs.h> | |
37 | #include <asm/war.h> | |
38 | ||
39 | /* | |
40 | * clear_bit() doesn't provide any barrier for the compiler. | |
41 | */ | |
42 | #define smp_mb__before_clear_bit() smp_mb() | |
43 | #define smp_mb__after_clear_bit() smp_mb() | |
44 | ||
45 | /* | |
46 | * Only disable interrupt for kernel mode stuff to keep usermode stuff | |
47 | * that dares to use kernel include files alive. | |
48 | */ | |
49 | ||
50 | #define __bi_flags unsigned long flags | |
51 | #define __bi_local_irq_save(x) local_irq_save(x) | |
52 | #define __bi_local_irq_restore(x) local_irq_restore(x) | |
53 | #else | |
54 | #define __bi_flags | |
55 | #define __bi_local_irq_save(x) | |
56 | #define __bi_local_irq_restore(x) | |
57 | #endif /* __KERNEL__ */ | |
58 | ||
59 | /* | |
60 | * set_bit - Atomically set a bit in memory | |
61 | * @nr: the bit to set | |
62 | * @addr: the address to start counting from | |
63 | * | |
64 | * This function is atomic and may not be reordered. See __set_bit() | |
65 | * if you do not require the atomic guarantees. | |
66 | * Note that @nr may be almost arbitrarily large; this function is not | |
67 | * restricted to acting on a single-word quantity. | |
68 | */ | |
69 | static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | |
70 | { | |
71 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
72 | unsigned long temp; | |
73 | ||
74 | if (cpu_has_llsc && R10000_LLSC_WAR) { | |
75 | __asm__ __volatile__( | |
c4559f67 | 76 | " .set mips3 \n" |
1da177e4 LT |
77 | "1: " __LL "%0, %1 # set_bit \n" |
78 | " or %0, %2 \n" | |
aac8aa77 | 79 | " " __SC "%0, %1 \n" |
1da177e4 | 80 | " beqzl %0, 1b \n" |
aac8aa77 | 81 | " .set mips0 \n" |
1da177e4 LT |
82 | : "=&r" (temp), "=m" (*m) |
83 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); | |
84 | } else if (cpu_has_llsc) { | |
85 | __asm__ __volatile__( | |
c4559f67 | 86 | " .set mips3 \n" |
1da177e4 LT |
87 | "1: " __LL "%0, %1 # set_bit \n" |
88 | " or %0, %2 \n" | |
aac8aa77 | 89 | " " __SC "%0, %1 \n" |
1da177e4 | 90 | " beqz %0, 1b \n" |
aac8aa77 | 91 | " .set mips0 \n" |
1da177e4 LT |
92 | : "=&r" (temp), "=m" (*m) |
93 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); | |
94 | } else { | |
95 | volatile unsigned long *a = addr; | |
96 | unsigned long mask; | |
97 | __bi_flags; | |
98 | ||
99 | a += nr >> SZLONG_LOG; | |
100 | mask = 1UL << (nr & SZLONG_MASK); | |
101 | __bi_local_irq_save(flags); | |
102 | *a |= mask; | |
103 | __bi_local_irq_restore(flags); | |
104 | } | |
105 | } | |
106 | ||
107 | /* | |
108 | * __set_bit - Set a bit in memory | |
109 | * @nr: the bit to set | |
110 | * @addr: the address to start counting from | |
111 | * | |
112 | * Unlike set_bit(), this function is non-atomic and may be reordered. | |
113 | * If it's called on the same region of memory simultaneously, the effect | |
114 | * may be that only one operation succeeds. | |
115 | */ | |
116 | static inline void __set_bit(unsigned long nr, volatile unsigned long * addr) | |
117 | { | |
118 | unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
119 | ||
120 | *m |= 1UL << (nr & SZLONG_MASK); | |
121 | } | |
122 | ||
123 | /* | |
124 | * clear_bit - Clears a bit in memory | |
125 | * @nr: Bit to clear | |
126 | * @addr: Address to start counting from | |
127 | * | |
128 | * clear_bit() is atomic and may not be reordered. However, it does | |
129 | * not contain a memory barrier, so if it is used for locking purposes, | |
130 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | |
131 | * in order to ensure changes are visible on other processors. | |
132 | */ | |
133 | static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | |
134 | { | |
135 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
136 | unsigned long temp; | |
137 | ||
138 | if (cpu_has_llsc && R10000_LLSC_WAR) { | |
139 | __asm__ __volatile__( | |
c4559f67 | 140 | " .set mips3 \n" |
1da177e4 LT |
141 | "1: " __LL "%0, %1 # clear_bit \n" |
142 | " and %0, %2 \n" | |
143 | " " __SC "%0, %1 \n" | |
144 | " beqzl %0, 1b \n" | |
aac8aa77 | 145 | " .set mips0 \n" |
1da177e4 LT |
146 | : "=&r" (temp), "=m" (*m) |
147 | : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m)); | |
148 | } else if (cpu_has_llsc) { | |
149 | __asm__ __volatile__( | |
c4559f67 | 150 | " .set mips3 \n" |
1da177e4 LT |
151 | "1: " __LL "%0, %1 # clear_bit \n" |
152 | " and %0, %2 \n" | |
153 | " " __SC "%0, %1 \n" | |
154 | " beqz %0, 1b \n" | |
aac8aa77 | 155 | " .set mips0 \n" |
1da177e4 LT |
156 | : "=&r" (temp), "=m" (*m) |
157 | : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m)); | |
158 | } else { | |
159 | volatile unsigned long *a = addr; | |
160 | unsigned long mask; | |
161 | __bi_flags; | |
162 | ||
163 | a += nr >> SZLONG_LOG; | |
164 | mask = 1UL << (nr & SZLONG_MASK); | |
165 | __bi_local_irq_save(flags); | |
166 | *a &= ~mask; | |
167 | __bi_local_irq_restore(flags); | |
168 | } | |
169 | } | |
170 | ||
171 | /* | |
172 | * __clear_bit - Clears a bit in memory | |
173 | * @nr: Bit to clear | |
174 | * @addr: Address to start counting from | |
175 | * | |
176 | * Unlike clear_bit(), this function is non-atomic and may be reordered. | |
177 | * If it's called on the same region of memory simultaneously, the effect | |
178 | * may be that only one operation succeeds. | |
179 | */ | |
180 | static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr) | |
181 | { | |
182 | unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
183 | ||
184 | *m &= ~(1UL << (nr & SZLONG_MASK)); | |
185 | } | |
186 | ||
187 | /* | |
188 | * change_bit - Toggle a bit in memory | |
189 | * @nr: Bit to change | |
190 | * @addr: Address to start counting from | |
191 | * | |
192 | * change_bit() is atomic and may not be reordered. | |
193 | * Note that @nr may be almost arbitrarily large; this function is not | |
194 | * restricted to acting on a single-word quantity. | |
195 | */ | |
196 | static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |
197 | { | |
198 | if (cpu_has_llsc && R10000_LLSC_WAR) { | |
199 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
200 | unsigned long temp; | |
201 | ||
202 | __asm__ __volatile__( | |
c4559f67 | 203 | " .set mips3 \n" |
1da177e4 LT |
204 | "1: " __LL "%0, %1 # change_bit \n" |
205 | " xor %0, %2 \n" | |
aac8aa77 | 206 | " " __SC "%0, %1 \n" |
1da177e4 | 207 | " beqzl %0, 1b \n" |
aac8aa77 | 208 | " .set mips0 \n" |
1da177e4 LT |
209 | : "=&r" (temp), "=m" (*m) |
210 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); | |
211 | } else if (cpu_has_llsc) { | |
212 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
213 | unsigned long temp; | |
214 | ||
215 | __asm__ __volatile__( | |
c4559f67 | 216 | " .set mips3 \n" |
1da177e4 LT |
217 | "1: " __LL "%0, %1 # change_bit \n" |
218 | " xor %0, %2 \n" | |
aac8aa77 | 219 | " " __SC "%0, %1 \n" |
1da177e4 | 220 | " beqz %0, 1b \n" |
aac8aa77 | 221 | " .set mips0 \n" |
1da177e4 LT |
222 | : "=&r" (temp), "=m" (*m) |
223 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); | |
224 | } else { | |
225 | volatile unsigned long *a = addr; | |
226 | unsigned long mask; | |
227 | __bi_flags; | |
228 | ||
229 | a += nr >> SZLONG_LOG; | |
230 | mask = 1UL << (nr & SZLONG_MASK); | |
231 | __bi_local_irq_save(flags); | |
232 | *a ^= mask; | |
233 | __bi_local_irq_restore(flags); | |
234 | } | |
235 | } | |
236 | ||
237 | /* | |
238 | * __change_bit - Toggle a bit in memory | |
239 | * @nr: the bit to change | |
240 | * @addr: the address to start counting from | |
241 | * | |
242 | * Unlike change_bit(), this function is non-atomic and may be reordered. | |
243 | * If it's called on the same region of memory simultaneously, the effect | |
244 | * may be that only one operation succeeds. | |
245 | */ | |
246 | static inline void __change_bit(unsigned long nr, volatile unsigned long * addr) | |
247 | { | |
248 | unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
249 | ||
250 | *m ^= 1UL << (nr & SZLONG_MASK); | |
251 | } | |
252 | ||
253 | /* | |
254 | * test_and_set_bit - Set a bit and return its old value | |
255 | * @nr: Bit to set | |
256 | * @addr: Address to count from | |
257 | * | |
258 | * This operation is atomic and cannot be reordered. | |
259 | * It also implies a memory barrier. | |
260 | */ | |
261 | static inline int test_and_set_bit(unsigned long nr, | |
262 | volatile unsigned long *addr) | |
263 | { | |
264 | if (cpu_has_llsc && R10000_LLSC_WAR) { | |
265 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
266 | unsigned long temp, res; | |
267 | ||
268 | __asm__ __volatile__( | |
c4559f67 | 269 | " .set mips3 \n" |
1da177e4 LT |
270 | "1: " __LL "%0, %1 # test_and_set_bit \n" |
271 | " or %2, %0, %3 \n" | |
272 | " " __SC "%2, %1 \n" | |
273 | " beqzl %2, 1b \n" | |
274 | " and %2, %0, %3 \n" | |
275 | #ifdef CONFIG_SMP | |
aac8aa77 | 276 | " sync \n" |
1da177e4 | 277 | #endif |
aac8aa77 | 278 | " .set mips0 \n" |
1da177e4 LT |
279 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
280 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | |
281 | : "memory"); | |
282 | ||
283 | return res != 0; | |
284 | } else if (cpu_has_llsc) { | |
285 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
286 | unsigned long temp, res; | |
287 | ||
288 | __asm__ __volatile__( | |
aac8aa77 MR |
289 | " .set push \n" |
290 | " .set noreorder \n" | |
c4559f67 | 291 | " .set mips3 \n" |
aac8aa77 | 292 | "1: " __LL "%0, %1 # test_and_set_bit \n" |
1da177e4 LT |
293 | " or %2, %0, %3 \n" |
294 | " " __SC "%2, %1 \n" | |
295 | " beqz %2, 1b \n" | |
296 | " and %2, %0, %3 \n" | |
297 | #ifdef CONFIG_SMP | |
aac8aa77 | 298 | " sync \n" |
1da177e4 | 299 | #endif |
aac8aa77 | 300 | " .set pop \n" |
1da177e4 LT |
301 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
302 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | |
303 | : "memory"); | |
304 | ||
305 | return res != 0; | |
306 | } else { | |
307 | volatile unsigned long *a = addr; | |
308 | unsigned long mask; | |
309 | int retval; | |
310 | __bi_flags; | |
311 | ||
312 | a += nr >> SZLONG_LOG; | |
313 | mask = 1UL << (nr & SZLONG_MASK); | |
314 | __bi_local_irq_save(flags); | |
315 | retval = (mask & *a) != 0; | |
316 | *a |= mask; | |
317 | __bi_local_irq_restore(flags); | |
318 | ||
319 | return retval; | |
320 | } | |
321 | } | |
322 | ||
323 | /* | |
324 | * __test_and_set_bit - Set a bit and return its old value | |
325 | * @nr: Bit to set | |
326 | * @addr: Address to count from | |
327 | * | |
328 | * This operation is non-atomic and can be reordered. | |
329 | * If two examples of this operation race, one can appear to succeed | |
330 | * but actually fail. You must protect multiple accesses with a lock. | |
331 | */ | |
332 | static inline int __test_and_set_bit(unsigned long nr, | |
333 | volatile unsigned long *addr) | |
334 | { | |
335 | volatile unsigned long *a = addr; | |
336 | unsigned long mask; | |
337 | int retval; | |
338 | ||
339 | a += nr >> SZLONG_LOG; | |
340 | mask = 1UL << (nr & SZLONG_MASK); | |
341 | retval = (mask & *a) != 0; | |
342 | *a |= mask; | |
343 | ||
344 | return retval; | |
345 | } | |
346 | ||
347 | /* | |
348 | * test_and_clear_bit - Clear a bit and return its old value | |
349 | * @nr: Bit to clear | |
350 | * @addr: Address to count from | |
351 | * | |
352 | * This operation is atomic and cannot be reordered. | |
353 | * It also implies a memory barrier. | |
354 | */ | |
355 | static inline int test_and_clear_bit(unsigned long nr, | |
356 | volatile unsigned long *addr) | |
357 | { | |
358 | if (cpu_has_llsc && R10000_LLSC_WAR) { | |
359 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
360 | unsigned long temp, res; | |
361 | ||
362 | __asm__ __volatile__( | |
c4559f67 | 363 | " .set mips3 \n" |
1da177e4 LT |
364 | "1: " __LL "%0, %1 # test_and_clear_bit \n" |
365 | " or %2, %0, %3 \n" | |
366 | " xor %2, %3 \n" | |
aac8aa77 | 367 | " " __SC "%2, %1 \n" |
1da177e4 LT |
368 | " beqzl %2, 1b \n" |
369 | " and %2, %0, %3 \n" | |
370 | #ifdef CONFIG_SMP | |
371 | " sync \n" | |
372 | #endif | |
aac8aa77 | 373 | " .set mips0 \n" |
1da177e4 LT |
374 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
375 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | |
376 | : "memory"); | |
377 | ||
378 | return res != 0; | |
379 | } else if (cpu_has_llsc) { | |
380 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
381 | unsigned long temp, res; | |
382 | ||
383 | __asm__ __volatile__( | |
aac8aa77 MR |
384 | " .set push \n" |
385 | " .set noreorder \n" | |
c4559f67 | 386 | " .set mips3 \n" |
aac8aa77 | 387 | "1: " __LL "%0, %1 # test_and_clear_bit \n" |
1da177e4 LT |
388 | " or %2, %0, %3 \n" |
389 | " xor %2, %3 \n" | |
aac8aa77 | 390 | " " __SC "%2, %1 \n" |
1da177e4 LT |
391 | " beqz %2, 1b \n" |
392 | " and %2, %0, %3 \n" | |
393 | #ifdef CONFIG_SMP | |
394 | " sync \n" | |
395 | #endif | |
aac8aa77 | 396 | " .set pop \n" |
1da177e4 LT |
397 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
398 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | |
399 | : "memory"); | |
400 | ||
401 | return res != 0; | |
402 | } else { | |
403 | volatile unsigned long *a = addr; | |
404 | unsigned long mask; | |
405 | int retval; | |
406 | __bi_flags; | |
407 | ||
408 | a += nr >> SZLONG_LOG; | |
409 | mask = 1UL << (nr & SZLONG_MASK); | |
410 | __bi_local_irq_save(flags); | |
411 | retval = (mask & *a) != 0; | |
412 | *a &= ~mask; | |
413 | __bi_local_irq_restore(flags); | |
414 | ||
415 | return retval; | |
416 | } | |
417 | } | |
418 | ||
419 | /* | |
420 | * __test_and_clear_bit - Clear a bit and return its old value | |
421 | * @nr: Bit to clear | |
422 | * @addr: Address to count from | |
423 | * | |
424 | * This operation is non-atomic and can be reordered. | |
425 | * If two examples of this operation race, one can appear to succeed | |
426 | * but actually fail. You must protect multiple accesses with a lock. | |
427 | */ | |
428 | static inline int __test_and_clear_bit(unsigned long nr, | |
429 | volatile unsigned long * addr) | |
430 | { | |
431 | volatile unsigned long *a = addr; | |
432 | unsigned long mask; | |
433 | int retval; | |
434 | ||
435 | a += (nr >> SZLONG_LOG); | |
436 | mask = 1UL << (nr & SZLONG_MASK); | |
437 | retval = ((mask & *a) != 0); | |
438 | *a &= ~mask; | |
439 | ||
440 | return retval; | |
441 | } | |
442 | ||
443 | /* | |
444 | * test_and_change_bit - Change a bit and return its old value | |
445 | * @nr: Bit to change | |
446 | * @addr: Address to count from | |
447 | * | |
448 | * This operation is atomic and cannot be reordered. | |
449 | * It also implies a memory barrier. | |
450 | */ | |
451 | static inline int test_and_change_bit(unsigned long nr, | |
452 | volatile unsigned long *addr) | |
453 | { | |
454 | if (cpu_has_llsc && R10000_LLSC_WAR) { | |
455 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
456 | unsigned long temp, res; | |
457 | ||
458 | __asm__ __volatile__( | |
c4559f67 | 459 | " .set mips3 \n" |
aac8aa77 | 460 | "1: " __LL "%0, %1 # test_and_change_bit \n" |
1da177e4 | 461 | " xor %2, %0, %3 \n" |
aac8aa77 | 462 | " " __SC "%2, %1 \n" |
1da177e4 LT |
463 | " beqzl %2, 1b \n" |
464 | " and %2, %0, %3 \n" | |
465 | #ifdef CONFIG_SMP | |
466 | " sync \n" | |
467 | #endif | |
aac8aa77 | 468 | " .set mips0 \n" |
1da177e4 LT |
469 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
470 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | |
471 | : "memory"); | |
472 | ||
473 | return res != 0; | |
474 | } else if (cpu_has_llsc) { | |
475 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
476 | unsigned long temp, res; | |
477 | ||
478 | __asm__ __volatile__( | |
aac8aa77 MR |
479 | " .set push \n" |
480 | " .set noreorder \n" | |
c4559f67 | 481 | " .set mips3 \n" |
aac8aa77 | 482 | "1: " __LL "%0, %1 # test_and_change_bit \n" |
1da177e4 | 483 | " xor %2, %0, %3 \n" |
aac8aa77 | 484 | " " __SC "\t%2, %1 \n" |
1da177e4 LT |
485 | " beqz %2, 1b \n" |
486 | " and %2, %0, %3 \n" | |
487 | #ifdef CONFIG_SMP | |
488 | " sync \n" | |
489 | #endif | |
aac8aa77 | 490 | " .set pop \n" |
1da177e4 LT |
491 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
492 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | |
493 | : "memory"); | |
494 | ||
495 | return res != 0; | |
496 | } else { | |
497 | volatile unsigned long *a = addr; | |
498 | unsigned long mask, retval; | |
499 | __bi_flags; | |
500 | ||
501 | a += nr >> SZLONG_LOG; | |
502 | mask = 1UL << (nr & SZLONG_MASK); | |
503 | __bi_local_irq_save(flags); | |
504 | retval = (mask & *a) != 0; | |
505 | *a ^= mask; | |
506 | __bi_local_irq_restore(flags); | |
507 | ||
508 | return retval; | |
509 | } | |
510 | } | |
511 | ||
512 | /* | |
513 | * __test_and_change_bit - Change a bit and return its old value | |
514 | * @nr: Bit to change | |
515 | * @addr: Address to count from | |
516 | * | |
517 | * This operation is non-atomic and can be reordered. | |
518 | * If two examples of this operation race, one can appear to succeed | |
519 | * but actually fail. You must protect multiple accesses with a lock. | |
520 | */ | |
521 | static inline int __test_and_change_bit(unsigned long nr, | |
522 | volatile unsigned long *addr) | |
523 | { | |
524 | volatile unsigned long *a = addr; | |
525 | unsigned long mask; | |
526 | int retval; | |
527 | ||
528 | a += (nr >> SZLONG_LOG); | |
529 | mask = 1UL << (nr & SZLONG_MASK); | |
530 | retval = ((mask & *a) != 0); | |
531 | *a ^= mask; | |
532 | ||
533 | return retval; | |
534 | } | |
535 | ||
536 | #undef __bi_flags | |
537 | #undef __bi_local_irq_save | |
538 | #undef __bi_local_irq_restore | |
539 | ||
540 | /* | |
541 | * test_bit - Determine whether a bit is set | |
542 | * @nr: bit number to test | |
543 | * @addr: Address to start counting from | |
544 | */ | |
545 | static inline int test_bit(unsigned long nr, const volatile unsigned long *addr) | |
546 | { | |
547 | return 1UL & (addr[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK)); | |
548 | } | |
549 | ||
550 | /* | |
ec917c2c | 551 | * Return the bit position (0..63) of the most significant 1 bit in a word |
65903265 RB |
552 | * Returns -1 if no 1 bit exists |
553 | */ | |
ec917c2c | 554 | static inline int __ilog2(unsigned long x) |
65903265 RB |
555 | { |
556 | int lz; | |
557 | ||
ec917c2c RB |
558 | if (sizeof(x) == 4) { |
559 | __asm__ ( | |
560 | " .set push \n" | |
561 | " .set mips32 \n" | |
562 | " clz %0, %1 \n" | |
563 | " .set pop \n" | |
564 | : "=r" (lz) | |
565 | : "r" (x)); | |
65903265 | 566 | |
ec917c2c RB |
567 | return 31 - lz; |
568 | } | |
569 | ||
570 | BUG_ON(sizeof(x) != 8); | |
65903265 RB |
571 | |
572 | __asm__ ( | |
573 | " .set push \n" | |
574 | " .set mips64 \n" | |
575 | " dclz %0, %1 \n" | |
576 | " .set pop \n" | |
577 | : "=r" (lz) | |
578 | : "r" (x)); | |
579 | ||
580 | return 63 - lz; | |
581 | } | |
65903265 RB |
582 | |
583 | /* | |
584 | * __ffs - find first bit in word. | |
1da177e4 LT |
585 | * @word: The word to search |
586 | * | |
65903265 RB |
587 | * Returns 0..SZLONG-1 |
588 | * Undefined if no bit exists, so code should check against 0 first. | |
1da177e4 | 589 | */ |
65903265 | 590 | static inline unsigned long __ffs(unsigned long word) |
1da177e4 | 591 | { |
ec917c2c | 592 | #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) |
65903265 RB |
593 | return __ilog2(word & -word); |
594 | #else | |
1da177e4 LT |
595 | int b = 0, s; |
596 | ||
875d43e7 | 597 | #ifdef CONFIG_32BIT |
1da177e4 LT |
598 | s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s; |
599 | s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s; | |
600 | s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s; | |
601 | s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s; | |
602 | s = 1; if (word << 31 != 0) s = 0; b += s; | |
ec917c2c RB |
603 | |
604 | return b; | |
1da177e4 | 605 | #endif |
875d43e7 | 606 | #ifdef CONFIG_64BIT |
1da177e4 LT |
607 | s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s; |
608 | s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s; | |
609 | s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s; | |
610 | s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s; | |
611 | s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s; | |
612 | s = 1; if (word << 63 != 0) s = 0; b += s; | |
ec917c2c | 613 | |
1da177e4 | 614 | return b; |
65903265 | 615 | #endif |
ec917c2c | 616 | #endif |
1da177e4 LT |
617 | } |
618 | ||
619 | /* | |
65903265 | 620 | * ffs - find first bit set. |
1da177e4 LT |
621 | * @word: The word to search |
622 | * | |
65903265 RB |
623 | * Returns 1..SZLONG |
624 | * Returns 0 if no bit exists | |
1da177e4 | 625 | */ |
65903265 RB |
626 | |
627 | static inline unsigned long ffs(unsigned long word) | |
1da177e4 | 628 | { |
65903265 RB |
629 | if (!word) |
630 | return 0; | |
631 | ||
632 | return __ffs(word) + 1; | |
1da177e4 LT |
633 | } |
634 | ||
635 | /* | |
65903265 RB |
636 | * ffz - find first zero in word. |
637 | * @word: The word to search | |
638 | * | |
639 | * Undefined if no zero exists, so code should check against ~0UL first. | |
1da177e4 | 640 | */ |
65903265 RB |
641 | static inline unsigned long ffz(unsigned long word) |
642 | { | |
643 | return __ffs (~word); | |
644 | } | |
645 | ||
646 | /* | |
2caf1900 | 647 | * fls - find last bit set. |
65903265 RB |
648 | * @word: The word to search |
649 | * | |
2caf1900 RB |
650 | * Returns 1..SZLONG |
651 | * Returns 0 if no bit exists | |
65903265 | 652 | */ |
2caf1900 | 653 | static inline unsigned long fls(unsigned long word) |
65903265 | 654 | { |
ec917c2c | 655 | #ifdef CONFIG_32BIT |
2caf1900 RB |
656 | #ifdef CONFIG_CPU_MIPS32 |
657 | __asm__ ("clz %0, %1" : "=r" (word) : "r" (word)); | |
658 | ||
659 | return 32 - word; | |
660 | #else | |
661 | { | |
662 | int r = 32, s; | |
663 | ||
664 | if (word == 0) | |
665 | return 0; | |
666 | ||
65903265 RB |
667 | s = 16; if ((word & 0xffff0000)) s = 0; r -= s; word <<= s; |
668 | s = 8; if ((word & 0xff000000)) s = 0; r -= s; word <<= s; | |
669 | s = 4; if ((word & 0xf0000000)) s = 0; r -= s; word <<= s; | |
670 | s = 2; if ((word & 0xc0000000)) s = 0; r -= s; word <<= s; | |
671 | s = 1; if ((word & 0x80000000)) s = 0; r -= s; | |
ec917c2c RB |
672 | |
673 | return r; | |
2caf1900 | 674 | } |
65903265 | 675 | #endif |
2caf1900 RB |
676 | #endif /* CONFIG_32BIT */ |
677 | ||
ec917c2c | 678 | #ifdef CONFIG_64BIT |
2caf1900 RB |
679 | #ifdef CONFIG_CPU_MIPS64 |
680 | ||
681 | __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word)); | |
682 | ||
683 | return 64 - word; | |
684 | #else | |
685 | { | |
686 | int r = 64, s; | |
687 | ||
688 | if (word == 0) | |
689 | return 0; | |
690 | ||
65903265 RB |
691 | s = 32; if ((word & 0xffffffff00000000UL)) s = 0; r -= s; word <<= s; |
692 | s = 16; if ((word & 0xffff000000000000UL)) s = 0; r -= s; word <<= s; | |
693 | s = 8; if ((word & 0xff00000000000000UL)) s = 0; r -= s; word <<= s; | |
694 | s = 4; if ((word & 0xf000000000000000UL)) s = 0; r -= s; word <<= s; | |
695 | s = 2; if ((word & 0xc000000000000000UL)) s = 0; r -= s; word <<= s; | |
696 | s = 1; if ((word & 0x8000000000000000UL)) s = 0; r -= s; | |
ec917c2c | 697 | |
65903265 | 698 | return r; |
2caf1900 | 699 | } |
65903265 | 700 | #endif |
2caf1900 | 701 | #endif /* CONFIG_64BIT */ |
65903265 RB |
702 | } |
703 | ||
3821af2f | 704 | #define fls64(x) generic_fls64(x) |
1da177e4 LT |
705 | |
706 | /* | |
707 | * find_next_zero_bit - find the first zero bit in a memory region | |
708 | * @addr: The address to base the search on | |
709 | * @offset: The bitnumber to start searching at | |
710 | * @size: The maximum size to search | |
711 | */ | |
712 | static inline unsigned long find_next_zero_bit(const unsigned long *addr, | |
713 | unsigned long size, unsigned long offset) | |
714 | { | |
715 | const unsigned long *p = addr + (offset >> SZLONG_LOG); | |
716 | unsigned long result = offset & ~SZLONG_MASK; | |
717 | unsigned long tmp; | |
718 | ||
719 | if (offset >= size) | |
720 | return size; | |
721 | size -= result; | |
722 | offset &= SZLONG_MASK; | |
723 | if (offset) { | |
724 | tmp = *(p++); | |
725 | tmp |= ~0UL >> (_MIPS_SZLONG-offset); | |
726 | if (size < _MIPS_SZLONG) | |
727 | goto found_first; | |
728 | if (~tmp) | |
729 | goto found_middle; | |
730 | size -= _MIPS_SZLONG; | |
731 | result += _MIPS_SZLONG; | |
732 | } | |
733 | while (size & ~SZLONG_MASK) { | |
734 | if (~(tmp = *(p++))) | |
735 | goto found_middle; | |
736 | result += _MIPS_SZLONG; | |
737 | size -= _MIPS_SZLONG; | |
738 | } | |
739 | if (!size) | |
740 | return result; | |
741 | tmp = *p; | |
742 | ||
743 | found_first: | |
744 | tmp |= ~0UL << size; | |
745 | if (tmp == ~0UL) /* Are any bits zero? */ | |
746 | return result + size; /* Nope. */ | |
747 | found_middle: | |
748 | return result + ffz(tmp); | |
749 | } | |
750 | ||
751 | #define find_first_zero_bit(addr, size) \ | |
752 | find_next_zero_bit((addr), (size), 0) | |
753 | ||
754 | /* | |
755 | * find_next_bit - find the next set bit in a memory region | |
756 | * @addr: The address to base the search on | |
757 | * @offset: The bitnumber to start searching at | |
758 | * @size: The maximum size to search | |
759 | */ | |
760 | static inline unsigned long find_next_bit(const unsigned long *addr, | |
761 | unsigned long size, unsigned long offset) | |
762 | { | |
763 | const unsigned long *p = addr + (offset >> SZLONG_LOG); | |
764 | unsigned long result = offset & ~SZLONG_MASK; | |
765 | unsigned long tmp; | |
766 | ||
767 | if (offset >= size) | |
768 | return size; | |
769 | size -= result; | |
770 | offset &= SZLONG_MASK; | |
771 | if (offset) { | |
772 | tmp = *(p++); | |
773 | tmp &= ~0UL << offset; | |
774 | if (size < _MIPS_SZLONG) | |
775 | goto found_first; | |
776 | if (tmp) | |
777 | goto found_middle; | |
778 | size -= _MIPS_SZLONG; | |
779 | result += _MIPS_SZLONG; | |
780 | } | |
781 | while (size & ~SZLONG_MASK) { | |
782 | if ((tmp = *(p++))) | |
783 | goto found_middle; | |
784 | result += _MIPS_SZLONG; | |
785 | size -= _MIPS_SZLONG; | |
786 | } | |
787 | if (!size) | |
788 | return result; | |
789 | tmp = *p; | |
790 | ||
791 | found_first: | |
792 | tmp &= ~0UL >> (_MIPS_SZLONG - size); | |
793 | if (tmp == 0UL) /* Are any bits set? */ | |
794 | return result + size; /* Nope. */ | |
795 | found_middle: | |
796 | return result + __ffs(tmp); | |
797 | } | |
798 | ||
799 | /* | |
800 | * find_first_bit - find the first set bit in a memory region | |
801 | * @addr: The address to start the search at | |
802 | * @size: The maximum size to search | |
803 | * | |
804 | * Returns the bit-number of the first set bit, not the number of the byte | |
805 | * containing a bit. | |
806 | */ | |
807 | #define find_first_bit(addr, size) \ | |
808 | find_next_bit((addr), (size), 0) | |
809 | ||
810 | #ifdef __KERNEL__ | |
811 | ||
812 | /* | |
813 | * Every architecture must define this function. It's the fastest | |
814 | * way of searching a 140-bit bitmap where the first 100 bits are | |
815 | * unlikely to be set. It's guaranteed that at least one of the 140 | |
816 | * bits is cleared. | |
817 | */ | |
818 | static inline int sched_find_first_bit(const unsigned long *b) | |
819 | { | |
875d43e7 | 820 | #ifdef CONFIG_32BIT |
1da177e4 LT |
821 | if (unlikely(b[0])) |
822 | return __ffs(b[0]); | |
823 | if (unlikely(b[1])) | |
824 | return __ffs(b[1]) + 32; | |
825 | if (unlikely(b[2])) | |
826 | return __ffs(b[2]) + 64; | |
827 | if (b[3]) | |
828 | return __ffs(b[3]) + 96; | |
829 | return __ffs(b[4]) + 128; | |
830 | #endif | |
875d43e7 | 831 | #ifdef CONFIG_64BIT |
1da177e4 LT |
832 | if (unlikely(b[0])) |
833 | return __ffs(b[0]); | |
834 | if (unlikely(b[1])) | |
835 | return __ffs(b[1]) + 64; | |
836 | return __ffs(b[2]) + 128; | |
837 | #endif | |
838 | } | |
839 | ||
1da177e4 LT |
840 | /* |
841 | * hweightN - returns the hamming weight of a N-bit word | |
842 | * @x: the word to weigh | |
843 | * | |
844 | * The Hamming Weight of a number is the total number of bits set in it. | |
845 | */ | |
846 | ||
847 | #define hweight64(x) generic_hweight64(x) | |
848 | #define hweight32(x) generic_hweight32(x) | |
849 | #define hweight16(x) generic_hweight16(x) | |
850 | #define hweight8(x) generic_hweight8(x) | |
851 | ||
852 | static inline int __test_and_set_le_bit(unsigned long nr, unsigned long *addr) | |
853 | { | |
854 | unsigned char *ADDR = (unsigned char *) addr; | |
855 | int mask, retval; | |
856 | ||
857 | ADDR += nr >> 3; | |
858 | mask = 1 << (nr & 0x07); | |
859 | retval = (mask & *ADDR) != 0; | |
860 | *ADDR |= mask; | |
861 | ||
862 | return retval; | |
863 | } | |
864 | ||
865 | static inline int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr) | |
866 | { | |
867 | unsigned char *ADDR = (unsigned char *) addr; | |
868 | int mask, retval; | |
869 | ||
870 | ADDR += nr >> 3; | |
871 | mask = 1 << (nr & 0x07); | |
872 | retval = (mask & *ADDR) != 0; | |
873 | *ADDR &= ~mask; | |
874 | ||
875 | return retval; | |
876 | } | |
877 | ||
878 | static inline int test_le_bit(unsigned long nr, const unsigned long * addr) | |
879 | { | |
880 | const unsigned char *ADDR = (const unsigned char *) addr; | |
881 | int mask; | |
882 | ||
883 | ADDR += nr >> 3; | |
884 | mask = 1 << (nr & 0x07); | |
885 | ||
886 | return ((mask & *ADDR) != 0); | |
887 | } | |
888 | ||
889 | static inline unsigned long find_next_zero_le_bit(unsigned long *addr, | |
890 | unsigned long size, unsigned long offset) | |
891 | { | |
892 | unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG); | |
893 | unsigned long result = offset & ~SZLONG_MASK; | |
894 | unsigned long tmp; | |
895 | ||
896 | if (offset >= size) | |
897 | return size; | |
898 | size -= result; | |
899 | offset &= SZLONG_MASK; | |
900 | if (offset) { | |
901 | tmp = cpu_to_lelongp(p++); | |
902 | tmp |= ~0UL >> (_MIPS_SZLONG-offset); /* bug or feature ? */ | |
903 | if (size < _MIPS_SZLONG) | |
904 | goto found_first; | |
905 | if (~tmp) | |
906 | goto found_middle; | |
907 | size -= _MIPS_SZLONG; | |
908 | result += _MIPS_SZLONG; | |
909 | } | |
910 | while (size & ~SZLONG_MASK) { | |
911 | if (~(tmp = cpu_to_lelongp(p++))) | |
912 | goto found_middle; | |
913 | result += _MIPS_SZLONG; | |
914 | size -= _MIPS_SZLONG; | |
915 | } | |
916 | if (!size) | |
917 | return result; | |
918 | tmp = cpu_to_lelongp(p); | |
919 | ||
920 | found_first: | |
921 | tmp |= ~0UL << size; | |
922 | if (tmp == ~0UL) /* Are any bits zero? */ | |
923 | return result + size; /* Nope. */ | |
924 | ||
925 | found_middle: | |
926 | return result + ffz(tmp); | |
927 | } | |
928 | ||
929 | #define find_first_zero_le_bit(addr, size) \ | |
930 | find_next_zero_le_bit((addr), (size), 0) | |
931 | ||
932 | #define ext2_set_bit(nr,addr) \ | |
933 | __test_and_set_le_bit((nr),(unsigned long*)addr) | |
934 | #define ext2_clear_bit(nr, addr) \ | |
935 | __test_and_clear_le_bit((nr),(unsigned long*)addr) | |
936 | #define ext2_set_bit_atomic(lock, nr, addr) \ | |
937 | ({ \ | |
938 | int ret; \ | |
939 | spin_lock(lock); \ | |
940 | ret = ext2_set_bit((nr), (addr)); \ | |
941 | spin_unlock(lock); \ | |
942 | ret; \ | |
943 | }) | |
944 | ||
945 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | |
946 | ({ \ | |
947 | int ret; \ | |
948 | spin_lock(lock); \ | |
949 | ret = ext2_clear_bit((nr), (addr)); \ | |
950 | spin_unlock(lock); \ | |
951 | ret; \ | |
952 | }) | |
953 | #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr) | |
954 | #define ext2_find_first_zero_bit(addr, size) \ | |
955 | find_first_zero_le_bit((unsigned long*)addr, size) | |
956 | #define ext2_find_next_zero_bit(addr, size, off) \ | |
957 | find_next_zero_le_bit((unsigned long*)addr, size, off) | |
958 | ||
959 | /* | |
960 | * Bitmap functions for the minix filesystem. | |
961 | * | |
962 | * FIXME: These assume that Minix uses the native byte/bitorder. | |
963 | * This limits the Minix filesystem's value for data exchange very much. | |
964 | */ | |
67b0ad57 AM |
965 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr) |
966 | #define minix_set_bit(nr,addr) __set_bit(nr,addr) | |
967 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr) | |
1da177e4 LT |
968 | #define minix_test_bit(nr,addr) test_bit(nr,addr) |
969 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | |
970 | ||
971 | #endif /* __KERNEL__ */ | |
972 | ||
973 | #endif /* _ASM_BITOPS_H */ |