Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org) | |
7 | * Copyright (c) 1999, 2000 Silicon Graphics, Inc. | |
8 | */ | |
9 | #ifndef _ASM_BITOPS_H | |
10 | #define _ASM_BITOPS_H | |
11 | ||
12 | #include <linux/config.h> | |
13 | #include <linux/compiler.h> | |
14 | #include <linux/types.h> | |
15 | #include <asm/byteorder.h> /* sigh ... */ | |
16 | #include <asm/cpu-features.h> | |
17 | ||
18 | #if (_MIPS_SZLONG == 32) | |
19 | #define SZLONG_LOG 5 | |
20 | #define SZLONG_MASK 31UL | |
aac8aa77 MR |
21 | #define __LL "ll " |
22 | #define __SC "sc " | |
42a3b4f2 | 23 | #define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x)) |
1da177e4 LT |
24 | #elif (_MIPS_SZLONG == 64) |
25 | #define SZLONG_LOG 6 | |
26 | #define SZLONG_MASK 63UL | |
aac8aa77 MR |
27 | #define __LL "lld " |
28 | #define __SC "scd " | |
42a3b4f2 | 29 | #define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x)) |
1da177e4 LT |
30 | #endif |
31 | ||
32 | #ifdef __KERNEL__ | |
33 | ||
34 | #include <asm/interrupt.h> | |
35 | #include <asm/sgidefs.h> | |
36 | #include <asm/war.h> | |
37 | ||
38 | /* | |
39 | * clear_bit() doesn't provide any barrier for the compiler. | |
40 | */ | |
41 | #define smp_mb__before_clear_bit() smp_mb() | |
42 | #define smp_mb__after_clear_bit() smp_mb() | |
43 | ||
44 | /* | |
45 | * Only disable interrupt for kernel mode stuff to keep usermode stuff | |
46 | * that dares to use kernel include files alive. | |
47 | */ | |
48 | ||
49 | #define __bi_flags unsigned long flags | |
50 | #define __bi_local_irq_save(x) local_irq_save(x) | |
51 | #define __bi_local_irq_restore(x) local_irq_restore(x) | |
52 | #else | |
53 | #define __bi_flags | |
54 | #define __bi_local_irq_save(x) | |
55 | #define __bi_local_irq_restore(x) | |
56 | #endif /* __KERNEL__ */ | |
57 | ||
58 | /* | |
59 | * set_bit - Atomically set a bit in memory | |
60 | * @nr: the bit to set | |
61 | * @addr: the address to start counting from | |
62 | * | |
63 | * This function is atomic and may not be reordered. See __set_bit() | |
64 | * if you do not require the atomic guarantees. | |
65 | * Note that @nr may be almost arbitrarily large; this function is not | |
66 | * restricted to acting on a single-word quantity. | |
67 | */ | |
68 | static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | |
69 | { | |
70 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
71 | unsigned long temp; | |
72 | ||
73 | if (cpu_has_llsc && R10000_LLSC_WAR) { | |
74 | __asm__ __volatile__( | |
c4559f67 | 75 | " .set mips3 \n" |
1da177e4 LT |
76 | "1: " __LL "%0, %1 # set_bit \n" |
77 | " or %0, %2 \n" | |
aac8aa77 | 78 | " " __SC "%0, %1 \n" |
1da177e4 | 79 | " beqzl %0, 1b \n" |
aac8aa77 | 80 | " .set mips0 \n" |
1da177e4 LT |
81 | : "=&r" (temp), "=m" (*m) |
82 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); | |
83 | } else if (cpu_has_llsc) { | |
84 | __asm__ __volatile__( | |
c4559f67 | 85 | " .set mips3 \n" |
1da177e4 LT |
86 | "1: " __LL "%0, %1 # set_bit \n" |
87 | " or %0, %2 \n" | |
aac8aa77 | 88 | " " __SC "%0, %1 \n" |
1da177e4 | 89 | " beqz %0, 1b \n" |
aac8aa77 | 90 | " .set mips0 \n" |
1da177e4 LT |
91 | : "=&r" (temp), "=m" (*m) |
92 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); | |
93 | } else { | |
94 | volatile unsigned long *a = addr; | |
95 | unsigned long mask; | |
96 | __bi_flags; | |
97 | ||
98 | a += nr >> SZLONG_LOG; | |
99 | mask = 1UL << (nr & SZLONG_MASK); | |
100 | __bi_local_irq_save(flags); | |
101 | *a |= mask; | |
102 | __bi_local_irq_restore(flags); | |
103 | } | |
104 | } | |
105 | ||
106 | /* | |
107 | * __set_bit - Set a bit in memory | |
108 | * @nr: the bit to set | |
109 | * @addr: the address to start counting from | |
110 | * | |
111 | * Unlike set_bit(), this function is non-atomic and may be reordered. | |
112 | * If it's called on the same region of memory simultaneously, the effect | |
113 | * may be that only one operation succeeds. | |
114 | */ | |
115 | static inline void __set_bit(unsigned long nr, volatile unsigned long * addr) | |
116 | { | |
117 | unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
118 | ||
119 | *m |= 1UL << (nr & SZLONG_MASK); | |
120 | } | |
121 | ||
122 | /* | |
123 | * clear_bit - Clears a bit in memory | |
124 | * @nr: Bit to clear | |
125 | * @addr: Address to start counting from | |
126 | * | |
127 | * clear_bit() is atomic and may not be reordered. However, it does | |
128 | * not contain a memory barrier, so if it is used for locking purposes, | |
129 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | |
130 | * in order to ensure changes are visible on other processors. | |
131 | */ | |
132 | static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | |
133 | { | |
134 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
135 | unsigned long temp; | |
136 | ||
137 | if (cpu_has_llsc && R10000_LLSC_WAR) { | |
138 | __asm__ __volatile__( | |
c4559f67 | 139 | " .set mips3 \n" |
1da177e4 LT |
140 | "1: " __LL "%0, %1 # clear_bit \n" |
141 | " and %0, %2 \n" | |
142 | " " __SC "%0, %1 \n" | |
143 | " beqzl %0, 1b \n" | |
aac8aa77 | 144 | " .set mips0 \n" |
1da177e4 LT |
145 | : "=&r" (temp), "=m" (*m) |
146 | : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m)); | |
147 | } else if (cpu_has_llsc) { | |
148 | __asm__ __volatile__( | |
c4559f67 | 149 | " .set mips3 \n" |
1da177e4 LT |
150 | "1: " __LL "%0, %1 # clear_bit \n" |
151 | " and %0, %2 \n" | |
152 | " " __SC "%0, %1 \n" | |
153 | " beqz %0, 1b \n" | |
aac8aa77 | 154 | " .set mips0 \n" |
1da177e4 LT |
155 | : "=&r" (temp), "=m" (*m) |
156 | : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m)); | |
157 | } else { | |
158 | volatile unsigned long *a = addr; | |
159 | unsigned long mask; | |
160 | __bi_flags; | |
161 | ||
162 | a += nr >> SZLONG_LOG; | |
163 | mask = 1UL << (nr & SZLONG_MASK); | |
164 | __bi_local_irq_save(flags); | |
165 | *a &= ~mask; | |
166 | __bi_local_irq_restore(flags); | |
167 | } | |
168 | } | |
169 | ||
170 | /* | |
171 | * __clear_bit - Clears a bit in memory | |
172 | * @nr: Bit to clear | |
173 | * @addr: Address to start counting from | |
174 | * | |
175 | * Unlike clear_bit(), this function is non-atomic and may be reordered. | |
176 | * If it's called on the same region of memory simultaneously, the effect | |
177 | * may be that only one operation succeeds. | |
178 | */ | |
179 | static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr) | |
180 | { | |
181 | unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
182 | ||
183 | *m &= ~(1UL << (nr & SZLONG_MASK)); | |
184 | } | |
185 | ||
186 | /* | |
187 | * change_bit - Toggle a bit in memory | |
188 | * @nr: Bit to change | |
189 | * @addr: Address to start counting from | |
190 | * | |
191 | * change_bit() is atomic and may not be reordered. | |
192 | * Note that @nr may be almost arbitrarily large; this function is not | |
193 | * restricted to acting on a single-word quantity. | |
194 | */ | |
195 | static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |
196 | { | |
197 | if (cpu_has_llsc && R10000_LLSC_WAR) { | |
198 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
199 | unsigned long temp; | |
200 | ||
201 | __asm__ __volatile__( | |
c4559f67 | 202 | " .set mips3 \n" |
1da177e4 LT |
203 | "1: " __LL "%0, %1 # change_bit \n" |
204 | " xor %0, %2 \n" | |
aac8aa77 | 205 | " " __SC "%0, %1 \n" |
1da177e4 | 206 | " beqzl %0, 1b \n" |
aac8aa77 | 207 | " .set mips0 \n" |
1da177e4 LT |
208 | : "=&r" (temp), "=m" (*m) |
209 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); | |
210 | } else if (cpu_has_llsc) { | |
211 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
212 | unsigned long temp; | |
213 | ||
214 | __asm__ __volatile__( | |
c4559f67 | 215 | " .set mips3 \n" |
1da177e4 LT |
216 | "1: " __LL "%0, %1 # change_bit \n" |
217 | " xor %0, %2 \n" | |
aac8aa77 | 218 | " " __SC "%0, %1 \n" |
1da177e4 | 219 | " beqz %0, 1b \n" |
aac8aa77 | 220 | " .set mips0 \n" |
1da177e4 LT |
221 | : "=&r" (temp), "=m" (*m) |
222 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); | |
223 | } else { | |
224 | volatile unsigned long *a = addr; | |
225 | unsigned long mask; | |
226 | __bi_flags; | |
227 | ||
228 | a += nr >> SZLONG_LOG; | |
229 | mask = 1UL << (nr & SZLONG_MASK); | |
230 | __bi_local_irq_save(flags); | |
231 | *a ^= mask; | |
232 | __bi_local_irq_restore(flags); | |
233 | } | |
234 | } | |
235 | ||
236 | /* | |
237 | * __change_bit - Toggle a bit in memory | |
238 | * @nr: the bit to change | |
239 | * @addr: the address to start counting from | |
240 | * | |
241 | * Unlike change_bit(), this function is non-atomic and may be reordered. | |
242 | * If it's called on the same region of memory simultaneously, the effect | |
243 | * may be that only one operation succeeds. | |
244 | */ | |
245 | static inline void __change_bit(unsigned long nr, volatile unsigned long * addr) | |
246 | { | |
247 | unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
248 | ||
249 | *m ^= 1UL << (nr & SZLONG_MASK); | |
250 | } | |
251 | ||
252 | /* | |
253 | * test_and_set_bit - Set a bit and return its old value | |
254 | * @nr: Bit to set | |
255 | * @addr: Address to count from | |
256 | * | |
257 | * This operation is atomic and cannot be reordered. | |
258 | * It also implies a memory barrier. | |
259 | */ | |
260 | static inline int test_and_set_bit(unsigned long nr, | |
261 | volatile unsigned long *addr) | |
262 | { | |
263 | if (cpu_has_llsc && R10000_LLSC_WAR) { | |
264 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
265 | unsigned long temp, res; | |
266 | ||
267 | __asm__ __volatile__( | |
c4559f67 | 268 | " .set mips3 \n" |
1da177e4 LT |
269 | "1: " __LL "%0, %1 # test_and_set_bit \n" |
270 | " or %2, %0, %3 \n" | |
271 | " " __SC "%2, %1 \n" | |
272 | " beqzl %2, 1b \n" | |
273 | " and %2, %0, %3 \n" | |
274 | #ifdef CONFIG_SMP | |
aac8aa77 | 275 | " sync \n" |
1da177e4 | 276 | #endif |
aac8aa77 | 277 | " .set mips0 \n" |
1da177e4 LT |
278 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
279 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | |
280 | : "memory"); | |
281 | ||
282 | return res != 0; | |
283 | } else if (cpu_has_llsc) { | |
284 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
285 | unsigned long temp, res; | |
286 | ||
287 | __asm__ __volatile__( | |
aac8aa77 MR |
288 | " .set push \n" |
289 | " .set noreorder \n" | |
c4559f67 | 290 | " .set mips3 \n" |
aac8aa77 | 291 | "1: " __LL "%0, %1 # test_and_set_bit \n" |
1da177e4 LT |
292 | " or %2, %0, %3 \n" |
293 | " " __SC "%2, %1 \n" | |
294 | " beqz %2, 1b \n" | |
295 | " and %2, %0, %3 \n" | |
296 | #ifdef CONFIG_SMP | |
aac8aa77 | 297 | " sync \n" |
1da177e4 | 298 | #endif |
aac8aa77 | 299 | " .set pop \n" |
1da177e4 LT |
300 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
301 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | |
302 | : "memory"); | |
303 | ||
304 | return res != 0; | |
305 | } else { | |
306 | volatile unsigned long *a = addr; | |
307 | unsigned long mask; | |
308 | int retval; | |
309 | __bi_flags; | |
310 | ||
311 | a += nr >> SZLONG_LOG; | |
312 | mask = 1UL << (nr & SZLONG_MASK); | |
313 | __bi_local_irq_save(flags); | |
314 | retval = (mask & *a) != 0; | |
315 | *a |= mask; | |
316 | __bi_local_irq_restore(flags); | |
317 | ||
318 | return retval; | |
319 | } | |
320 | } | |
321 | ||
322 | /* | |
323 | * __test_and_set_bit - Set a bit and return its old value | |
324 | * @nr: Bit to set | |
325 | * @addr: Address to count from | |
326 | * | |
327 | * This operation is non-atomic and can be reordered. | |
328 | * If two examples of this operation race, one can appear to succeed | |
329 | * but actually fail. You must protect multiple accesses with a lock. | |
330 | */ | |
331 | static inline int __test_and_set_bit(unsigned long nr, | |
332 | volatile unsigned long *addr) | |
333 | { | |
334 | volatile unsigned long *a = addr; | |
335 | unsigned long mask; | |
336 | int retval; | |
337 | ||
338 | a += nr >> SZLONG_LOG; | |
339 | mask = 1UL << (nr & SZLONG_MASK); | |
340 | retval = (mask & *a) != 0; | |
341 | *a |= mask; | |
342 | ||
343 | return retval; | |
344 | } | |
345 | ||
346 | /* | |
347 | * test_and_clear_bit - Clear a bit and return its old value | |
348 | * @nr: Bit to clear | |
349 | * @addr: Address to count from | |
350 | * | |
351 | * This operation is atomic and cannot be reordered. | |
352 | * It also implies a memory barrier. | |
353 | */ | |
354 | static inline int test_and_clear_bit(unsigned long nr, | |
355 | volatile unsigned long *addr) | |
356 | { | |
357 | if (cpu_has_llsc && R10000_LLSC_WAR) { | |
358 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
359 | unsigned long temp, res; | |
360 | ||
361 | __asm__ __volatile__( | |
c4559f67 | 362 | " .set mips3 \n" |
1da177e4 LT |
363 | "1: " __LL "%0, %1 # test_and_clear_bit \n" |
364 | " or %2, %0, %3 \n" | |
365 | " xor %2, %3 \n" | |
aac8aa77 | 366 | " " __SC "%2, %1 \n" |
1da177e4 LT |
367 | " beqzl %2, 1b \n" |
368 | " and %2, %0, %3 \n" | |
369 | #ifdef CONFIG_SMP | |
370 | " sync \n" | |
371 | #endif | |
aac8aa77 | 372 | " .set mips0 \n" |
1da177e4 LT |
373 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
374 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | |
375 | : "memory"); | |
376 | ||
377 | return res != 0; | |
378 | } else if (cpu_has_llsc) { | |
379 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
380 | unsigned long temp, res; | |
381 | ||
382 | __asm__ __volatile__( | |
aac8aa77 MR |
383 | " .set push \n" |
384 | " .set noreorder \n" | |
c4559f67 | 385 | " .set mips3 \n" |
aac8aa77 | 386 | "1: " __LL "%0, %1 # test_and_clear_bit \n" |
1da177e4 LT |
387 | " or %2, %0, %3 \n" |
388 | " xor %2, %3 \n" | |
aac8aa77 | 389 | " " __SC "%2, %1 \n" |
1da177e4 LT |
390 | " beqz %2, 1b \n" |
391 | " and %2, %0, %3 \n" | |
392 | #ifdef CONFIG_SMP | |
393 | " sync \n" | |
394 | #endif | |
aac8aa77 | 395 | " .set pop \n" |
1da177e4 LT |
396 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
397 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | |
398 | : "memory"); | |
399 | ||
400 | return res != 0; | |
401 | } else { | |
402 | volatile unsigned long *a = addr; | |
403 | unsigned long mask; | |
404 | int retval; | |
405 | __bi_flags; | |
406 | ||
407 | a += nr >> SZLONG_LOG; | |
408 | mask = 1UL << (nr & SZLONG_MASK); | |
409 | __bi_local_irq_save(flags); | |
410 | retval = (mask & *a) != 0; | |
411 | *a &= ~mask; | |
412 | __bi_local_irq_restore(flags); | |
413 | ||
414 | return retval; | |
415 | } | |
416 | } | |
417 | ||
418 | /* | |
419 | * __test_and_clear_bit - Clear a bit and return its old value | |
420 | * @nr: Bit to clear | |
421 | * @addr: Address to count from | |
422 | * | |
423 | * This operation is non-atomic and can be reordered. | |
424 | * If two examples of this operation race, one can appear to succeed | |
425 | * but actually fail. You must protect multiple accesses with a lock. | |
426 | */ | |
427 | static inline int __test_and_clear_bit(unsigned long nr, | |
428 | volatile unsigned long * addr) | |
429 | { | |
430 | volatile unsigned long *a = addr; | |
431 | unsigned long mask; | |
432 | int retval; | |
433 | ||
434 | a += (nr >> SZLONG_LOG); | |
435 | mask = 1UL << (nr & SZLONG_MASK); | |
436 | retval = ((mask & *a) != 0); | |
437 | *a &= ~mask; | |
438 | ||
439 | return retval; | |
440 | } | |
441 | ||
442 | /* | |
443 | * test_and_change_bit - Change a bit and return its old value | |
444 | * @nr: Bit to change | |
445 | * @addr: Address to count from | |
446 | * | |
447 | * This operation is atomic and cannot be reordered. | |
448 | * It also implies a memory barrier. | |
449 | */ | |
450 | static inline int test_and_change_bit(unsigned long nr, | |
451 | volatile unsigned long *addr) | |
452 | { | |
453 | if (cpu_has_llsc && R10000_LLSC_WAR) { | |
454 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
455 | unsigned long temp, res; | |
456 | ||
457 | __asm__ __volatile__( | |
c4559f67 | 458 | " .set mips3 \n" |
aac8aa77 | 459 | "1: " __LL "%0, %1 # test_and_change_bit \n" |
1da177e4 | 460 | " xor %2, %0, %3 \n" |
aac8aa77 | 461 | " " __SC "%2, %1 \n" |
1da177e4 LT |
462 | " beqzl %2, 1b \n" |
463 | " and %2, %0, %3 \n" | |
464 | #ifdef CONFIG_SMP | |
465 | " sync \n" | |
466 | #endif | |
aac8aa77 | 467 | " .set mips0 \n" |
1da177e4 LT |
468 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
469 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | |
470 | : "memory"); | |
471 | ||
472 | return res != 0; | |
473 | } else if (cpu_has_llsc) { | |
474 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | |
475 | unsigned long temp, res; | |
476 | ||
477 | __asm__ __volatile__( | |
aac8aa77 MR |
478 | " .set push \n" |
479 | " .set noreorder \n" | |
c4559f67 | 480 | " .set mips3 \n" |
aac8aa77 | 481 | "1: " __LL "%0, %1 # test_and_change_bit \n" |
1da177e4 | 482 | " xor %2, %0, %3 \n" |
aac8aa77 | 483 | " " __SC "\t%2, %1 \n" |
1da177e4 LT |
484 | " beqz %2, 1b \n" |
485 | " and %2, %0, %3 \n" | |
486 | #ifdef CONFIG_SMP | |
487 | " sync \n" | |
488 | #endif | |
aac8aa77 | 489 | " .set pop \n" |
1da177e4 LT |
490 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
491 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | |
492 | : "memory"); | |
493 | ||
494 | return res != 0; | |
495 | } else { | |
496 | volatile unsigned long *a = addr; | |
497 | unsigned long mask, retval; | |
498 | __bi_flags; | |
499 | ||
500 | a += nr >> SZLONG_LOG; | |
501 | mask = 1UL << (nr & SZLONG_MASK); | |
502 | __bi_local_irq_save(flags); | |
503 | retval = (mask & *a) != 0; | |
504 | *a ^= mask; | |
505 | __bi_local_irq_restore(flags); | |
506 | ||
507 | return retval; | |
508 | } | |
509 | } | |
510 | ||
511 | /* | |
512 | * __test_and_change_bit - Change a bit and return its old value | |
513 | * @nr: Bit to change | |
514 | * @addr: Address to count from | |
515 | * | |
516 | * This operation is non-atomic and can be reordered. | |
517 | * If two examples of this operation race, one can appear to succeed | |
518 | * but actually fail. You must protect multiple accesses with a lock. | |
519 | */ | |
520 | static inline int __test_and_change_bit(unsigned long nr, | |
521 | volatile unsigned long *addr) | |
522 | { | |
523 | volatile unsigned long *a = addr; | |
524 | unsigned long mask; | |
525 | int retval; | |
526 | ||
527 | a += (nr >> SZLONG_LOG); | |
528 | mask = 1UL << (nr & SZLONG_MASK); | |
529 | retval = ((mask & *a) != 0); | |
530 | *a ^= mask; | |
531 | ||
532 | return retval; | |
533 | } | |
534 | ||
535 | #undef __bi_flags | |
536 | #undef __bi_local_irq_save | |
537 | #undef __bi_local_irq_restore | |
538 | ||
539 | /* | |
540 | * test_bit - Determine whether a bit is set | |
541 | * @nr: bit number to test | |
542 | * @addr: Address to start counting from | |
543 | */ | |
544 | static inline int test_bit(unsigned long nr, const volatile unsigned long *addr) | |
545 | { | |
546 | return 1UL & (addr[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK)); | |
547 | } | |
548 | ||
549 | /* | |
550 | * ffz - find first zero in word. | |
551 | * @word: The word to search | |
552 | * | |
553 | * Undefined if no zero exists, so code should check against ~0UL first. | |
554 | */ | |
555 | static inline unsigned long ffz(unsigned long word) | |
556 | { | |
557 | int b = 0, s; | |
558 | ||
559 | word = ~word; | |
875d43e7 | 560 | #ifdef CONFIG_32BIT |
1da177e4 LT |
561 | s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s; |
562 | s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s; | |
563 | s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s; | |
564 | s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s; | |
565 | s = 1; if (word << 31 != 0) s = 0; b += s; | |
566 | #endif | |
875d43e7 | 567 | #ifdef CONFIG_64BIT |
1da177e4 LT |
568 | s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s; |
569 | s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s; | |
570 | s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s; | |
571 | s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s; | |
572 | s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s; | |
573 | s = 1; if (word << 63 != 0) s = 0; b += s; | |
574 | #endif | |
575 | ||
576 | return b; | |
577 | } | |
578 | ||
579 | /* | |
580 | * __ffs - find first bit in word. | |
581 | * @word: The word to search | |
582 | * | |
583 | * Undefined if no bit exists, so code should check against 0 first. | |
584 | */ | |
585 | static inline unsigned long __ffs(unsigned long word) | |
586 | { | |
587 | return ffz(~word); | |
588 | } | |
589 | ||
590 | /* | |
591 | * fls: find last bit set. | |
592 | */ | |
593 | ||
594 | #define fls(x) generic_fls(x) | |
595 | ||
596 | /* | |
597 | * find_next_zero_bit - find the first zero bit in a memory region | |
598 | * @addr: The address to base the search on | |
599 | * @offset: The bitnumber to start searching at | |
600 | * @size: The maximum size to search | |
601 | */ | |
602 | static inline unsigned long find_next_zero_bit(const unsigned long *addr, | |
603 | unsigned long size, unsigned long offset) | |
604 | { | |
605 | const unsigned long *p = addr + (offset >> SZLONG_LOG); | |
606 | unsigned long result = offset & ~SZLONG_MASK; | |
607 | unsigned long tmp; | |
608 | ||
609 | if (offset >= size) | |
610 | return size; | |
611 | size -= result; | |
612 | offset &= SZLONG_MASK; | |
613 | if (offset) { | |
614 | tmp = *(p++); | |
615 | tmp |= ~0UL >> (_MIPS_SZLONG-offset); | |
616 | if (size < _MIPS_SZLONG) | |
617 | goto found_first; | |
618 | if (~tmp) | |
619 | goto found_middle; | |
620 | size -= _MIPS_SZLONG; | |
621 | result += _MIPS_SZLONG; | |
622 | } | |
623 | while (size & ~SZLONG_MASK) { | |
624 | if (~(tmp = *(p++))) | |
625 | goto found_middle; | |
626 | result += _MIPS_SZLONG; | |
627 | size -= _MIPS_SZLONG; | |
628 | } | |
629 | if (!size) | |
630 | return result; | |
631 | tmp = *p; | |
632 | ||
633 | found_first: | |
634 | tmp |= ~0UL << size; | |
635 | if (tmp == ~0UL) /* Are any bits zero? */ | |
636 | return result + size; /* Nope. */ | |
637 | found_middle: | |
638 | return result + ffz(tmp); | |
639 | } | |
640 | ||
641 | #define find_first_zero_bit(addr, size) \ | |
642 | find_next_zero_bit((addr), (size), 0) | |
643 | ||
644 | /* | |
645 | * find_next_bit - find the next set bit in a memory region | |
646 | * @addr: The address to base the search on | |
647 | * @offset: The bitnumber to start searching at | |
648 | * @size: The maximum size to search | |
649 | */ | |
650 | static inline unsigned long find_next_bit(const unsigned long *addr, | |
651 | unsigned long size, unsigned long offset) | |
652 | { | |
653 | const unsigned long *p = addr + (offset >> SZLONG_LOG); | |
654 | unsigned long result = offset & ~SZLONG_MASK; | |
655 | unsigned long tmp; | |
656 | ||
657 | if (offset >= size) | |
658 | return size; | |
659 | size -= result; | |
660 | offset &= SZLONG_MASK; | |
661 | if (offset) { | |
662 | tmp = *(p++); | |
663 | tmp &= ~0UL << offset; | |
664 | if (size < _MIPS_SZLONG) | |
665 | goto found_first; | |
666 | if (tmp) | |
667 | goto found_middle; | |
668 | size -= _MIPS_SZLONG; | |
669 | result += _MIPS_SZLONG; | |
670 | } | |
671 | while (size & ~SZLONG_MASK) { | |
672 | if ((tmp = *(p++))) | |
673 | goto found_middle; | |
674 | result += _MIPS_SZLONG; | |
675 | size -= _MIPS_SZLONG; | |
676 | } | |
677 | if (!size) | |
678 | return result; | |
679 | tmp = *p; | |
680 | ||
681 | found_first: | |
682 | tmp &= ~0UL >> (_MIPS_SZLONG - size); | |
683 | if (tmp == 0UL) /* Are any bits set? */ | |
684 | return result + size; /* Nope. */ | |
685 | found_middle: | |
686 | return result + __ffs(tmp); | |
687 | } | |
688 | ||
689 | /* | |
690 | * find_first_bit - find the first set bit in a memory region | |
691 | * @addr: The address to start the search at | |
692 | * @size: The maximum size to search | |
693 | * | |
694 | * Returns the bit-number of the first set bit, not the number of the byte | |
695 | * containing a bit. | |
696 | */ | |
697 | #define find_first_bit(addr, size) \ | |
698 | find_next_bit((addr), (size), 0) | |
699 | ||
700 | #ifdef __KERNEL__ | |
701 | ||
702 | /* | |
703 | * Every architecture must define this function. It's the fastest | |
704 | * way of searching a 140-bit bitmap where the first 100 bits are | |
705 | * unlikely to be set. It's guaranteed that at least one of the 140 | |
706 | * bits is cleared. | |
707 | */ | |
708 | static inline int sched_find_first_bit(const unsigned long *b) | |
709 | { | |
875d43e7 | 710 | #ifdef CONFIG_32BIT |
1da177e4 LT |
711 | if (unlikely(b[0])) |
712 | return __ffs(b[0]); | |
713 | if (unlikely(b[1])) | |
714 | return __ffs(b[1]) + 32; | |
715 | if (unlikely(b[2])) | |
716 | return __ffs(b[2]) + 64; | |
717 | if (b[3]) | |
718 | return __ffs(b[3]) + 96; | |
719 | return __ffs(b[4]) + 128; | |
720 | #endif | |
875d43e7 | 721 | #ifdef CONFIG_64BIT |
1da177e4 LT |
722 | if (unlikely(b[0])) |
723 | return __ffs(b[0]); | |
724 | if (unlikely(b[1])) | |
725 | return __ffs(b[1]) + 64; | |
726 | return __ffs(b[2]) + 128; | |
727 | #endif | |
728 | } | |
729 | ||
730 | /* | |
731 | * ffs - find first bit set | |
732 | * @x: the word to search | |
733 | * | |
734 | * This is defined the same way as | |
735 | * the libc and compiler builtin ffs routines, therefore | |
736 | * differs in spirit from the above ffz (man ffs). | |
737 | */ | |
738 | ||
739 | #define ffs(x) generic_ffs(x) | |
740 | ||
741 | /* | |
742 | * hweightN - returns the hamming weight of a N-bit word | |
743 | * @x: the word to weigh | |
744 | * | |
745 | * The Hamming Weight of a number is the total number of bits set in it. | |
746 | */ | |
747 | ||
748 | #define hweight64(x) generic_hweight64(x) | |
749 | #define hweight32(x) generic_hweight32(x) | |
750 | #define hweight16(x) generic_hweight16(x) | |
751 | #define hweight8(x) generic_hweight8(x) | |
752 | ||
753 | static inline int __test_and_set_le_bit(unsigned long nr, unsigned long *addr) | |
754 | { | |
755 | unsigned char *ADDR = (unsigned char *) addr; | |
756 | int mask, retval; | |
757 | ||
758 | ADDR += nr >> 3; | |
759 | mask = 1 << (nr & 0x07); | |
760 | retval = (mask & *ADDR) != 0; | |
761 | *ADDR |= mask; | |
762 | ||
763 | return retval; | |
764 | } | |
765 | ||
766 | static inline int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr) | |
767 | { | |
768 | unsigned char *ADDR = (unsigned char *) addr; | |
769 | int mask, retval; | |
770 | ||
771 | ADDR += nr >> 3; | |
772 | mask = 1 << (nr & 0x07); | |
773 | retval = (mask & *ADDR) != 0; | |
774 | *ADDR &= ~mask; | |
775 | ||
776 | return retval; | |
777 | } | |
778 | ||
779 | static inline int test_le_bit(unsigned long nr, const unsigned long * addr) | |
780 | { | |
781 | const unsigned char *ADDR = (const unsigned char *) addr; | |
782 | int mask; | |
783 | ||
784 | ADDR += nr >> 3; | |
785 | mask = 1 << (nr & 0x07); | |
786 | ||
787 | return ((mask & *ADDR) != 0); | |
788 | } | |
789 | ||
790 | static inline unsigned long find_next_zero_le_bit(unsigned long *addr, | |
791 | unsigned long size, unsigned long offset) | |
792 | { | |
793 | unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG); | |
794 | unsigned long result = offset & ~SZLONG_MASK; | |
795 | unsigned long tmp; | |
796 | ||
797 | if (offset >= size) | |
798 | return size; | |
799 | size -= result; | |
800 | offset &= SZLONG_MASK; | |
801 | if (offset) { | |
802 | tmp = cpu_to_lelongp(p++); | |
803 | tmp |= ~0UL >> (_MIPS_SZLONG-offset); /* bug or feature ? */ | |
804 | if (size < _MIPS_SZLONG) | |
805 | goto found_first; | |
806 | if (~tmp) | |
807 | goto found_middle; | |
808 | size -= _MIPS_SZLONG; | |
809 | result += _MIPS_SZLONG; | |
810 | } | |
811 | while (size & ~SZLONG_MASK) { | |
812 | if (~(tmp = cpu_to_lelongp(p++))) | |
813 | goto found_middle; | |
814 | result += _MIPS_SZLONG; | |
815 | size -= _MIPS_SZLONG; | |
816 | } | |
817 | if (!size) | |
818 | return result; | |
819 | tmp = cpu_to_lelongp(p); | |
820 | ||
821 | found_first: | |
822 | tmp |= ~0UL << size; | |
823 | if (tmp == ~0UL) /* Are any bits zero? */ | |
824 | return result + size; /* Nope. */ | |
825 | ||
826 | found_middle: | |
827 | return result + ffz(tmp); | |
828 | } | |
829 | ||
830 | #define find_first_zero_le_bit(addr, size) \ | |
831 | find_next_zero_le_bit((addr), (size), 0) | |
832 | ||
833 | #define ext2_set_bit(nr,addr) \ | |
834 | __test_and_set_le_bit((nr),(unsigned long*)addr) | |
835 | #define ext2_clear_bit(nr, addr) \ | |
836 | __test_and_clear_le_bit((nr),(unsigned long*)addr) | |
837 | #define ext2_set_bit_atomic(lock, nr, addr) \ | |
838 | ({ \ | |
839 | int ret; \ | |
840 | spin_lock(lock); \ | |
841 | ret = ext2_set_bit((nr), (addr)); \ | |
842 | spin_unlock(lock); \ | |
843 | ret; \ | |
844 | }) | |
845 | ||
846 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | |
847 | ({ \ | |
848 | int ret; \ | |
849 | spin_lock(lock); \ | |
850 | ret = ext2_clear_bit((nr), (addr)); \ | |
851 | spin_unlock(lock); \ | |
852 | ret; \ | |
853 | }) | |
854 | #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr) | |
855 | #define ext2_find_first_zero_bit(addr, size) \ | |
856 | find_first_zero_le_bit((unsigned long*)addr, size) | |
857 | #define ext2_find_next_zero_bit(addr, size, off) \ | |
858 | find_next_zero_le_bit((unsigned long*)addr, size, off) | |
859 | ||
860 | /* | |
861 | * Bitmap functions for the minix filesystem. | |
862 | * | |
863 | * FIXME: These assume that Minix uses the native byte/bitorder. | |
864 | * This limits the Minix filesystem's value for data exchange very much. | |
865 | */ | |
866 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | |
867 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | |
868 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | |
869 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | |
870 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | |
871 | ||
872 | #endif /* __KERNEL__ */ | |
873 | ||
874 | #endif /* _ASM_BITOPS_H */ |