Merge remote-tracking branches 'regulator/topic/rn5t618', 'regulator/topic/rpm',...
[deliverable/linux.git] / arch / mips / include / asm / bitops.h
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
102fa15c 6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
1da177e4
LT
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
0624517d
JS
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
1da177e4
LT
16#include <linux/compiler.h>
17#include <linux/types.h>
0004a9df 18#include <asm/barrier.h>
1da177e4
LT
19#include <asm/byteorder.h> /* sigh ... */
20#include <asm/cpu-features.h>
4ffd8b38
RB
21#include <asm/sgidefs.h>
22#include <asm/war.h>
1da177e4 23
49a89efb 24#if _MIPS_SZLONG == 32
1da177e4
LT
25#define SZLONG_LOG 5
26#define SZLONG_MASK 31UL
aac8aa77
MR
27#define __LL "ll "
28#define __SC "sc "
70342287
RB
29#define __INS "ins "
30#define __EXT "ext "
49a89efb 31#elif _MIPS_SZLONG == 64
1da177e4
LT
32#define SZLONG_LOG 6
33#define SZLONG_MASK 63UL
aac8aa77
MR
34#define __LL "lld "
35#define __SC "scd "
70342287
RB
36#define __INS "dins "
37#define __EXT "dext "
1da177e4
LT
38#endif
39
92d11594
JQ
40/*
41 * These are the "slower" versions of the functions and are in bitops.c.
42 * These functions call raw_local_irq_{save,restore}().
43 */
44void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
45void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
46void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
47int __mips_test_and_set_bit(unsigned long nr,
48 volatile unsigned long *addr);
49int __mips_test_and_set_bit_lock(unsigned long nr,
50 volatile unsigned long *addr);
51int __mips_test_and_clear_bit(unsigned long nr,
52 volatile unsigned long *addr);
53int __mips_test_and_change_bit(unsigned long nr,
54 volatile unsigned long *addr);
55
56
1da177e4
LT
57/*
58 * set_bit - Atomically set a bit in memory
59 * @nr: the bit to set
60 * @addr: the address to start counting from
61 *
62 * This function is atomic and may not be reordered. See __set_bit()
63 * if you do not require the atomic guarantees.
64 * Note that @nr may be almost arbitrarily large; this function is not
65 * restricted to acting on a single-word quantity.
66 */
67static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
68{
69 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
9de79c50 70 int bit = nr & SZLONG_MASK;
1da177e4
LT
71 unsigned long temp;
72
b791d119 73 if (kernel_uses_llsc && R10000_LLSC_WAR) {
1da177e4 74 __asm__ __volatile__(
a809d460 75 " .set arch=r4000 \n"
1da177e4
LT
76 "1: " __LL "%0, %1 # set_bit \n"
77 " or %0, %2 \n"
aac8aa77 78 " " __SC "%0, %1 \n"
1da177e4 79 " beqzl %0, 1b \n"
aac8aa77 80 " .set mips0 \n"
1da177e4 81 : "=&r" (temp), "=m" (*m)
b961153b 82 : "ir" (1UL << bit), "m" (*m));
102fa15c 83#ifdef CONFIG_CPU_MIPSR2
b791d119 84 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
7837314d
RB
85 do {
86 __asm__ __volatile__(
87 " " __LL "%0, %1 # set_bit \n"
88 " " __INS "%0, %3, %2, 1 \n"
89 " " __SC "%0, %1 \n"
90 : "=&r" (temp), "+m" (*m)
91 : "ir" (bit), "r" (~0));
92 } while (unlikely(!temp));
102fa15c 93#endif /* CONFIG_CPU_MIPSR2 */
b791d119 94 } else if (kernel_uses_llsc) {
7837314d
RB
95 do {
96 __asm__ __volatile__(
a809d460 97 " .set arch=r4000 \n"
7837314d
RB
98 " " __LL "%0, %1 # set_bit \n"
99 " or %0, %2 \n"
100 " " __SC "%0, %1 \n"
101 " .set mips0 \n"
102 : "=&r" (temp), "+m" (*m)
103 : "ir" (1UL << bit));
104 } while (unlikely(!temp));
92d11594
JQ
105 } else
106 __mips_set_bit(nr, addr);
1da177e4
LT
107}
108
1da177e4
LT
109/*
110 * clear_bit - Clears a bit in memory
111 * @nr: Bit to clear
112 * @addr: Address to start counting from
113 *
114 * clear_bit() is atomic and may not be reordered. However, it does
115 * not contain a memory barrier, so if it is used for locking purposes,
91bbefe6 116 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
1da177e4
LT
117 * in order to ensure changes are visible on other processors.
118 */
119static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
120{
121 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
9de79c50 122 int bit = nr & SZLONG_MASK;
1da177e4
LT
123 unsigned long temp;
124
b791d119 125 if (kernel_uses_llsc && R10000_LLSC_WAR) {
1da177e4 126 __asm__ __volatile__(
a809d460 127 " .set arch=r4000 \n"
1da177e4
LT
128 "1: " __LL "%0, %1 # clear_bit \n"
129 " and %0, %2 \n"
130 " " __SC "%0, %1 \n"
131 " beqzl %0, 1b \n"
aac8aa77 132 " .set mips0 \n"
7837314d
RB
133 : "=&r" (temp), "+m" (*m)
134 : "ir" (~(1UL << bit)));
102fa15c 135#ifdef CONFIG_CPU_MIPSR2
b791d119 136 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
7837314d
RB
137 do {
138 __asm__ __volatile__(
139 " " __LL "%0, %1 # clear_bit \n"
140 " " __INS "%0, $0, %2, 1 \n"
141 " " __SC "%0, %1 \n"
142 : "=&r" (temp), "+m" (*m)
143 : "ir" (bit));
144 } while (unlikely(!temp));
102fa15c 145#endif /* CONFIG_CPU_MIPSR2 */
b791d119 146 } else if (kernel_uses_llsc) {
7837314d
RB
147 do {
148 __asm__ __volatile__(
a809d460 149 " .set arch=r4000 \n"
7837314d
RB
150 " " __LL "%0, %1 # clear_bit \n"
151 " and %0, %2 \n"
152 " " __SC "%0, %1 \n"
153 " .set mips0 \n"
154 : "=&r" (temp), "+m" (*m)
155 : "ir" (~(1UL << bit)));
156 } while (unlikely(!temp));
92d11594
JQ
157 } else
158 __mips_clear_bit(nr, addr);
1da177e4
LT
159}
160
728697cd
NP
161/*
162 * clear_bit_unlock - Clears a bit in memory
163 * @nr: Bit to clear
164 * @addr: Address to start counting from
165 *
166 * clear_bit() is atomic and implies release semantics before the memory
167 * operation. It can be used for an unlock.
168 */
169static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
170{
91bbefe6 171 smp_mb__before_atomic();
728697cd
NP
172 clear_bit(nr, addr);
173}
174
1da177e4
LT
175/*
176 * change_bit - Toggle a bit in memory
177 * @nr: Bit to change
178 * @addr: Address to start counting from
179 *
180 * change_bit() is atomic and may not be reordered.
181 * Note that @nr may be almost arbitrarily large; this function is not
182 * restricted to acting on a single-word quantity.
183 */
184static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
185{
9de79c50 186 int bit = nr & SZLONG_MASK;
b961153b 187
b791d119 188 if (kernel_uses_llsc && R10000_LLSC_WAR) {
1da177e4
LT
189 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
190 unsigned long temp;
191
192 __asm__ __volatile__(
a809d460 193 " .set arch=r4000 \n"
1da177e4
LT
194 "1: " __LL "%0, %1 # change_bit \n"
195 " xor %0, %2 \n"
aac8aa77 196 " " __SC "%0, %1 \n"
1da177e4 197 " beqzl %0, 1b \n"
aac8aa77 198 " .set mips0 \n"
7837314d
RB
199 : "=&r" (temp), "+m" (*m)
200 : "ir" (1UL << bit));
b791d119 201 } else if (kernel_uses_llsc) {
1da177e4
LT
202 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
203 unsigned long temp;
204
7837314d
RB
205 do {
206 __asm__ __volatile__(
a809d460 207 " .set arch=r4000 \n"
7837314d
RB
208 " " __LL "%0, %1 # change_bit \n"
209 " xor %0, %2 \n"
210 " " __SC "%0, %1 \n"
211 " .set mips0 \n"
212 : "=&r" (temp), "+m" (*m)
213 : "ir" (1UL << bit));
214 } while (unlikely(!temp));
92d11594
JQ
215 } else
216 __mips_change_bit(nr, addr);
1da177e4
LT
217}
218
1da177e4
LT
219/*
220 * test_and_set_bit - Set a bit and return its old value
221 * @nr: Bit to set
222 * @addr: Address to count from
223 *
224 * This operation is atomic and cannot be reordered.
225 * It also implies a memory barrier.
226 */
227static inline int test_and_set_bit(unsigned long nr,
228 volatile unsigned long *addr)
229{
9de79c50 230 int bit = nr & SZLONG_MASK;
ff72b7a6 231 unsigned long res;
b961153b 232
f252ffd5 233 smp_mb__before_llsc();
c8f30ae5 234
b791d119 235 if (kernel_uses_llsc && R10000_LLSC_WAR) {
1da177e4 236 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
ff72b7a6 237 unsigned long temp;
1da177e4
LT
238
239 __asm__ __volatile__(
a809d460 240 " .set arch=r4000 \n"
1da177e4
LT
241 "1: " __LL "%0, %1 # test_and_set_bit \n"
242 " or %2, %0, %3 \n"
243 " " __SC "%2, %1 \n"
244 " beqzl %2, 1b \n"
245 " and %2, %0, %3 \n"
aac8aa77 246 " .set mips0 \n"
7837314d
RB
247 : "=&r" (temp), "+m" (*m), "=&r" (res)
248 : "r" (1UL << bit)
1da177e4 249 : "memory");
b791d119 250 } else if (kernel_uses_llsc) {
1da177e4 251 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
ff72b7a6 252 unsigned long temp;
1da177e4 253
7837314d
RB
254 do {
255 __asm__ __volatile__(
a809d460 256 " .set arch=r4000 \n"
7837314d
RB
257 " " __LL "%0, %1 # test_and_set_bit \n"
258 " or %2, %0, %3 \n"
259 " " __SC "%2, %1 \n"
260 " .set mips0 \n"
261 : "=&r" (temp), "+m" (*m), "=&r" (res)
262 : "r" (1UL << bit)
263 : "memory");
264 } while (unlikely(!res));
265
266 res = temp & (1UL << bit);
92d11594
JQ
267 } else
268 res = __mips_test_and_set_bit(nr, addr);
0004a9df 269
17099b11 270 smp_llsc_mb();
ff72b7a6
RB
271
272 return res != 0;
1da177e4
LT
273}
274
728697cd
NP
275/*
276 * test_and_set_bit_lock - Set a bit and return its old value
277 * @nr: Bit to set
278 * @addr: Address to count from
279 *
280 * This operation is atomic and implies acquire ordering semantics
281 * after the memory operation.
282 */
283static inline int test_and_set_bit_lock(unsigned long nr,
284 volatile unsigned long *addr)
285{
9de79c50 286 int bit = nr & SZLONG_MASK;
728697cd
NP
287 unsigned long res;
288
b791d119 289 if (kernel_uses_llsc && R10000_LLSC_WAR) {
728697cd
NP
290 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
291 unsigned long temp;
292
293 __asm__ __volatile__(
a809d460 294 " .set arch=r4000 \n"
728697cd
NP
295 "1: " __LL "%0, %1 # test_and_set_bit \n"
296 " or %2, %0, %3 \n"
297 " " __SC "%2, %1 \n"
298 " beqzl %2, 1b \n"
299 " and %2, %0, %3 \n"
300 " .set mips0 \n"
7837314d
RB
301 : "=&r" (temp), "+m" (*m), "=&r" (res)
302 : "r" (1UL << bit)
728697cd 303 : "memory");
b791d119 304 } else if (kernel_uses_llsc) {
728697cd
NP
305 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
306 unsigned long temp;
307
7837314d
RB
308 do {
309 __asm__ __volatile__(
a809d460 310 " .set arch=r4000 \n"
7837314d
RB
311 " " __LL "%0, %1 # test_and_set_bit \n"
312 " or %2, %0, %3 \n"
313 " " __SC "%2, %1 \n"
314 " .set mips0 \n"
315 : "=&r" (temp), "+m" (*m), "=&r" (res)
316 : "r" (1UL << bit)
317 : "memory");
318 } while (unlikely(!res));
319
320 res = temp & (1UL << bit);
92d11594
JQ
321 } else
322 res = __mips_test_and_set_bit_lock(nr, addr);
728697cd
NP
323
324 smp_llsc_mb();
325
326 return res != 0;
327}
1da177e4
LT
328/*
329 * test_and_clear_bit - Clear a bit and return its old value
330 * @nr: Bit to clear
331 * @addr: Address to count from
332 *
333 * This operation is atomic and cannot be reordered.
334 * It also implies a memory barrier.
335 */
336static inline int test_and_clear_bit(unsigned long nr,
337 volatile unsigned long *addr)
338{
9de79c50 339 int bit = nr & SZLONG_MASK;
ff72b7a6 340 unsigned long res;
b961153b 341
f252ffd5 342 smp_mb__before_llsc();
c8f30ae5 343
b791d119 344 if (kernel_uses_llsc && R10000_LLSC_WAR) {
1da177e4 345 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
8e09ffb6 346 unsigned long temp;
1da177e4
LT
347
348 __asm__ __volatile__(
a809d460 349 " .set arch=r4000 \n"
1da177e4
LT
350 "1: " __LL "%0, %1 # test_and_clear_bit \n"
351 " or %2, %0, %3 \n"
352 " xor %2, %3 \n"
70342287 353 " " __SC "%2, %1 \n"
1da177e4
LT
354 " beqzl %2, 1b \n"
355 " and %2, %0, %3 \n"
aac8aa77 356 " .set mips0 \n"
7837314d
RB
357 : "=&r" (temp), "+m" (*m), "=&r" (res)
358 : "r" (1UL << bit)
1da177e4 359 : "memory");
102fa15c 360#ifdef CONFIG_CPU_MIPSR2
b791d119 361 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
102fa15c 362 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
ff72b7a6 363 unsigned long temp;
102fa15c 364
7837314d
RB
365 do {
366 __asm__ __volatile__(
70342287 367 " " __LL "%0, %1 # test_and_clear_bit \n"
7837314d 368 " " __EXT "%2, %0, %3, 1 \n"
70342287
RB
369 " " __INS "%0, $0, %3, 1 \n"
370 " " __SC "%0, %1 \n"
7837314d
RB
371 : "=&r" (temp), "+m" (*m), "=&r" (res)
372 : "ir" (bit)
373 : "memory");
374 } while (unlikely(!temp));
102fa15c 375#endif
b791d119 376 } else if (kernel_uses_llsc) {
1da177e4 377 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
ff72b7a6 378 unsigned long temp;
1da177e4 379
7837314d
RB
380 do {
381 __asm__ __volatile__(
a809d460 382 " .set arch=r4000 \n"
70342287 383 " " __LL "%0, %1 # test_and_clear_bit \n"
7837314d
RB
384 " or %2, %0, %3 \n"
385 " xor %2, %3 \n"
70342287 386 " " __SC "%2, %1 \n"
7837314d
RB
387 " .set mips0 \n"
388 : "=&r" (temp), "+m" (*m), "=&r" (res)
389 : "r" (1UL << bit)
390 : "memory");
391 } while (unlikely(!res));
392
393 res = temp & (1UL << bit);
92d11594
JQ
394 } else
395 res = __mips_test_and_clear_bit(nr, addr);
0004a9df 396
17099b11 397 smp_llsc_mb();
ff72b7a6
RB
398
399 return res != 0;
1da177e4
LT
400}
401
1da177e4
LT
402/*
403 * test_and_change_bit - Change a bit and return its old value
404 * @nr: Bit to change
405 * @addr: Address to count from
406 *
407 * This operation is atomic and cannot be reordered.
408 * It also implies a memory barrier.
409 */
410static inline int test_and_change_bit(unsigned long nr,
411 volatile unsigned long *addr)
412{
9de79c50 413 int bit = nr & SZLONG_MASK;
ff72b7a6 414 unsigned long res;
b961153b 415
f252ffd5 416 smp_mb__before_llsc();
c8f30ae5 417
b791d119 418 if (kernel_uses_llsc && R10000_LLSC_WAR) {
1da177e4 419 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
ff72b7a6 420 unsigned long temp;
1da177e4
LT
421
422 __asm__ __volatile__(
a809d460 423 " .set arch=r4000 \n"
aac8aa77 424 "1: " __LL "%0, %1 # test_and_change_bit \n"
1da177e4 425 " xor %2, %0, %3 \n"
aac8aa77 426 " " __SC "%2, %1 \n"
1da177e4
LT
427 " beqzl %2, 1b \n"
428 " and %2, %0, %3 \n"
aac8aa77 429 " .set mips0 \n"
7837314d
RB
430 : "=&r" (temp), "+m" (*m), "=&r" (res)
431 : "r" (1UL << bit)
1da177e4 432 : "memory");
b791d119 433 } else if (kernel_uses_llsc) {
1da177e4 434 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
ff72b7a6 435 unsigned long temp;
1da177e4 436
7837314d
RB
437 do {
438 __asm__ __volatile__(
a809d460 439 " .set arch=r4000 \n"
70342287 440 " " __LL "%0, %1 # test_and_change_bit \n"
7837314d
RB
441 " xor %2, %0, %3 \n"
442 " " __SC "\t%2, %1 \n"
443 " .set mips0 \n"
444 : "=&r" (temp), "+m" (*m), "=&r" (res)
445 : "r" (1UL << bit)
446 : "memory");
447 } while (unlikely(!res));
448
449 res = temp & (1UL << bit);
92d11594
JQ
450 } else
451 res = __mips_test_and_change_bit(nr, addr);
0004a9df 452
17099b11 453 smp_llsc_mb();
ff72b7a6
RB
454
455 return res != 0;
1da177e4
LT
456}
457
3c9ee7ef 458#include <asm-generic/bitops/non-atomic.h>
1da177e4 459
728697cd
NP
460/*
461 * __clear_bit_unlock - Clears a bit in memory
462 * @nr: Bit to clear
463 * @addr: Address to start counting from
464 *
465 * __clear_bit() is non-atomic and implies release semantics before the memory
466 * operation. It can be used for an unlock if no other CPUs can concurrently
467 * modify other bits in the word.
468 */
469static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
470{
471 smp_mb();
472 __clear_bit(nr, addr);
473}
474
1da177e4 475/*
ec917c2c 476 * Return the bit position (0..63) of the most significant 1 bit in a word
65903265
RB
477 * Returns -1 if no 1 bit exists
478 */
4816227b 479static inline unsigned long __fls(unsigned long word)
65903265 480{
4816227b 481 int num;
65903265 482
4816227b 483 if (BITS_PER_LONG == 32 &&
47740eb8 484 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
49a89efb 485 __asm__(
ec917c2c
RB
486 " .set push \n"
487 " .set mips32 \n"
488 " clz %0, %1 \n"
489 " .set pop \n"
4816227b
RB
490 : "=r" (num)
491 : "r" (word));
65903265 492
4816227b 493 return 31 - num;
ec917c2c
RB
494 }
495
4816227b
RB
496 if (BITS_PER_LONG == 64 &&
497 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
498 __asm__(
499 " .set push \n"
500 " .set mips64 \n"
501 " dclz %0, %1 \n"
502 " .set pop \n"
503 : "=r" (num)
504 : "r" (word));
65903265 505
4816227b
RB
506 return 63 - num;
507 }
508
509 num = BITS_PER_LONG - 1;
65903265 510
4816227b
RB
511#if BITS_PER_LONG == 64
512 if (!(word & (~0ul << 32))) {
513 num -= 32;
514 word <<= 32;
515 }
516#endif
517 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
518 num -= 16;
519 word <<= 16;
520 }
521 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
522 num -= 8;
523 word <<= 8;
524 }
525 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
526 num -= 4;
527 word <<= 4;
528 }
529 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
530 num -= 2;
531 word <<= 2;
532 }
533 if (!(word & (~0ul << (BITS_PER_LONG-1))))
534 num -= 1;
535 return num;
65903265 536}
65903265
RB
537
538/*
539 * __ffs - find first bit in word.
1da177e4
LT
540 * @word: The word to search
541 *
65903265
RB
542 * Returns 0..SZLONG-1
543 * Undefined if no bit exists, so code should check against 0 first.
1da177e4 544 */
65903265 545static inline unsigned long __ffs(unsigned long word)
1da177e4 546{
ddc0d009 547 return __fls(word & -word);
1da177e4
LT
548}
549
550/*
bc818247 551 * fls - find last bit set.
1da177e4
LT
552 * @word: The word to search
553 *
bc818247
AN
554 * This is defined the same way as ffs.
555 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
1da177e4 556 */
4816227b 557static inline int fls(int x)
1da177e4 558{
4816227b 559 int r;
65903265 560
47740eb8 561 if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
db873131
MR
562 __asm__(
563 " .set push \n"
564 " .set mips32 \n"
565 " clz %0, %1 \n"
566 " .set pop \n"
567 : "=r" (x)
568 : "r" (x));
1da177e4 569
4816227b
RB
570 return 32 - x;
571 }
bc818247 572
4816227b
RB
573 r = 32;
574 if (!x)
575 return 0;
576 if (!(x & 0xffff0000u)) {
577 x <<= 16;
578 r -= 16;
579 }
580 if (!(x & 0xff000000u)) {
581 x <<= 8;
582 r -= 8;
583 }
584 if (!(x & 0xf0000000u)) {
585 x <<= 4;
586 r -= 4;
587 }
588 if (!(x & 0xc0000000u)) {
589 x <<= 2;
590 r -= 2;
591 }
592 if (!(x & 0x80000000u)) {
593 x <<= 1;
594 r -= 1;
595 }
596 return r;
65903265 597}
4816227b 598
bc818247 599#include <asm-generic/bitops/fls64.h>
65903265
RB
600
601/*
bc818247 602 * ffs - find first bit set.
65903265
RB
603 * @word: The word to search
604 *
bc818247
AN
605 * This is defined the same way as
606 * the libc and compiler builtin ffs routines, therefore
607 * differs in spirit from the above ffz (man ffs).
65903265 608 */
bc818247 609static inline int ffs(int word)
65903265 610{
bc818247
AN
611 if (!word)
612 return 0;
2caf1900 613
bc818247 614 return fls(word & -word);
65903265
RB
615}
616
bc818247 617#include <asm-generic/bitops/ffz.h>
3c9ee7ef 618#include <asm-generic/bitops/find.h>
1da177e4
LT
619
620#ifdef __KERNEL__
621
3c9ee7ef 622#include <asm-generic/bitops/sched.h>
1a403d1d
DD
623
624#include <asm/arch_hweight.h>
625#include <asm-generic/bitops/const_hweight.h>
626
861b5ae7 627#include <asm-generic/bitops/le.h>
3c9ee7ef 628#include <asm-generic/bitops/ext2-atomic.h>
1da177e4
LT
629
630#endif /* __KERNEL__ */
631
632#endif /* _ASM_BITOPS_H */
This page took 0.783397 seconds and 5 git commands to generate.