amba-pl011, rename BIT macro
[deliverable/linux.git] / include / linux / bitops.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_BITOPS_H
2#define _LINUX_BITOPS_H
3#include <asm/types.h>
4
1da177e4
LT
5/*
6 * Include this here because some architectures need generic_ffs/fls in
7 * scope
8 */
9#include <asm/bitops.h>
10
3e037454
SN
11#define for_each_bit(bit, addr, size) \
12 for ((bit) = find_first_bit((addr), (size)); \
13 (bit) < (size); \
14 (bit) = find_next_bit((addr), (size), (bit) + 1))
15
16
1da177e4
LT
17static __inline__ int get_bitmask_order(unsigned int count)
18{
19 int order;
20
21 order = fls(count);
22 return order; /* We could be slightly more clever with -1 here... */
23}
24
94605eff
SS
25static __inline__ int get_count_order(unsigned int count)
26{
27 int order;
28
29 order = fls(count) - 1;
30 if (count & (count - 1))
31 order++;
32 return order;
33}
34
1da177e4
LT
35static inline unsigned long hweight_long(unsigned long w)
36{
e9bebd6f 37 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
1da177e4
LT
38}
39
45f8bde0 40/**
1da177e4 41 * rol32 - rotate a 32-bit value left
1da177e4
LT
42 * @word: value to rotate
43 * @shift: bits to roll
44 */
45static inline __u32 rol32(__u32 word, unsigned int shift)
46{
47 return (word << shift) | (word >> (32 - shift));
48}
49
45f8bde0 50/**
1da177e4 51 * ror32 - rotate a 32-bit value right
1da177e4
LT
52 * @word: value to rotate
53 * @shift: bits to roll
54 */
55static inline __u32 ror32(__u32 word, unsigned int shift)
56{
57 return (word >> shift) | (word << (32 - shift));
58}
59
962749af
AM
60static inline unsigned fls_long(unsigned long l)
61{
62 if (sizeof(l) == 4)
63 return fls(l);
64 return fls64(l);
65}
66
1da177e4 67#endif
This page took 0.377683 seconds and 5 git commands to generate.