Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_BITOPS_H |
2 | #define _LINUX_BITOPS_H | |
3 | #include <asm/types.h> | |
4 | ||
d05be13b | 5 | #ifdef __KERNEL__ |
93043ece | 6 | #define BIT(nr) (1UL << (nr)) |
bfd1ff63 | 7 | #define BIT_ULL(nr) (1ULL << (nr)) |
d05be13b JS |
8 | #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) |
9 | #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) | |
bfd1ff63 SP |
10 | #define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) |
11 | #define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) | |
d05be13b | 12 | #define BITS_PER_BYTE 8 |
ede9c697 | 13 | #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) |
d05be13b JS |
14 | #endif |
15 | ||
10ef6b0d CG |
16 | /* |
17 | * Create a contiguous bitmask starting at bit position @l and ending at | |
18 | * position @h. For example | |
19 | * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. | |
20 | */ | |
21 | #define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) | |
22 | #define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) | |
23 | ||
4677d4a5 BP |
24 | extern unsigned int __sw_hweight8(unsigned int w); |
25 | extern unsigned int __sw_hweight16(unsigned int w); | |
26 | extern unsigned int __sw_hweight32(unsigned int w); | |
27 | extern unsigned long __sw_hweight64(__u64 w); | |
28 | ||
1da177e4 LT |
29 | /* |
30 | * Include this here because some architectures need generic_ffs/fls in | |
31 | * scope | |
32 | */ | |
33 | #include <asm/bitops.h> | |
34 | ||
984b3f57 | 35 | #define for_each_set_bit(bit, addr, size) \ |
1e2ad28f RR |
36 | for ((bit) = find_first_bit((addr), (size)); \ |
37 | (bit) < (size); \ | |
38 | (bit) = find_next_bit((addr), (size), (bit) + 1)) | |
39 | ||
40 | /* same as for_each_set_bit() but use bit as value to start with */ | |
307b1cd7 | 41 | #define for_each_set_bit_from(bit, addr, size) \ |
1e2ad28f RR |
42 | for ((bit) = find_next_bit((addr), (size), (bit)); \ |
43 | (bit) < (size); \ | |
3e037454 SN |
44 | (bit) = find_next_bit((addr), (size), (bit) + 1)) |
45 | ||
03f4a822 AM |
46 | #define for_each_clear_bit(bit, addr, size) \ |
47 | for ((bit) = find_first_zero_bit((addr), (size)); \ | |
48 | (bit) < (size); \ | |
49 | (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) | |
50 | ||
51 | /* same as for_each_clear_bit() but use bit as value to start with */ | |
52 | #define for_each_clear_bit_from(bit, addr, size) \ | |
53 | for ((bit) = find_next_zero_bit((addr), (size), (bit)); \ | |
54 | (bit) < (size); \ | |
55 | (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) | |
56 | ||
1da177e4 LT |
57 | static __inline__ int get_bitmask_order(unsigned int count) |
58 | { | |
59 | int order; | |
9f41699e | 60 | |
1da177e4 LT |
61 | order = fls(count); |
62 | return order; /* We could be slightly more clever with -1 here... */ | |
63 | } | |
64 | ||
94605eff SS |
65 | static __inline__ int get_count_order(unsigned int count) |
66 | { | |
67 | int order; | |
9f41699e | 68 | |
94605eff SS |
69 | order = fls(count) - 1; |
70 | if (count & (count - 1)) | |
71 | order++; | |
72 | return order; | |
73 | } | |
74 | ||
1da177e4 LT |
75 | static inline unsigned long hweight_long(unsigned long w) |
76 | { | |
e9bebd6f | 77 | return sizeof(w) == 4 ? hweight32(w) : hweight64(w); |
1da177e4 LT |
78 | } |
79 | ||
f2ea0f5f AD |
80 | /** |
81 | * rol64 - rotate a 64-bit value left | |
82 | * @word: value to rotate | |
83 | * @shift: bits to roll | |
84 | */ | |
85 | static inline __u64 rol64(__u64 word, unsigned int shift) | |
86 | { | |
87 | return (word << shift) | (word >> (64 - shift)); | |
88 | } | |
89 | ||
90 | /** | |
91 | * ror64 - rotate a 64-bit value right | |
92 | * @word: value to rotate | |
93 | * @shift: bits to roll | |
94 | */ | |
95 | static inline __u64 ror64(__u64 word, unsigned int shift) | |
96 | { | |
97 | return (word >> shift) | (word << (64 - shift)); | |
98 | } | |
99 | ||
45f8bde0 | 100 | /** |
1da177e4 | 101 | * rol32 - rotate a 32-bit value left |
1da177e4 LT |
102 | * @word: value to rotate |
103 | * @shift: bits to roll | |
104 | */ | |
105 | static inline __u32 rol32(__u32 word, unsigned int shift) | |
106 | { | |
107 | return (word << shift) | (word >> (32 - shift)); | |
108 | } | |
109 | ||
45f8bde0 | 110 | /** |
1da177e4 | 111 | * ror32 - rotate a 32-bit value right |
1da177e4 LT |
112 | * @word: value to rotate |
113 | * @shift: bits to roll | |
114 | */ | |
115 | static inline __u32 ror32(__u32 word, unsigned int shift) | |
116 | { | |
117 | return (word >> shift) | (word << (32 - shift)); | |
118 | } | |
119 | ||
3afe3925 HH |
120 | /** |
121 | * rol16 - rotate a 16-bit value left | |
122 | * @word: value to rotate | |
123 | * @shift: bits to roll | |
124 | */ | |
125 | static inline __u16 rol16(__u16 word, unsigned int shift) | |
126 | { | |
127 | return (word << shift) | (word >> (16 - shift)); | |
128 | } | |
129 | ||
130 | /** | |
131 | * ror16 - rotate a 16-bit value right | |
132 | * @word: value to rotate | |
133 | * @shift: bits to roll | |
134 | */ | |
135 | static inline __u16 ror16(__u16 word, unsigned int shift) | |
136 | { | |
137 | return (word >> shift) | (word << (16 - shift)); | |
138 | } | |
139 | ||
140 | /** | |
141 | * rol8 - rotate an 8-bit value left | |
142 | * @word: value to rotate | |
143 | * @shift: bits to roll | |
144 | */ | |
145 | static inline __u8 rol8(__u8 word, unsigned int shift) | |
146 | { | |
147 | return (word << shift) | (word >> (8 - shift)); | |
148 | } | |
149 | ||
150 | /** | |
151 | * ror8 - rotate an 8-bit value right | |
152 | * @word: value to rotate | |
153 | * @shift: bits to roll | |
154 | */ | |
155 | static inline __u8 ror8(__u8 word, unsigned int shift) | |
156 | { | |
157 | return (word >> shift) | (word << (8 - shift)); | |
158 | } | |
7919a57b AH |
159 | |
160 | /** | |
161 | * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit | |
162 | * @value: value to sign extend | |
163 | * @index: 0 based bit index (0<=index<32) to sign bit | |
164 | */ | |
165 | static inline __s32 sign_extend32(__u32 value, int index) | |
166 | { | |
167 | __u8 shift = 31 - index; | |
168 | return (__s32)(value << shift) >> shift; | |
169 | } | |
3afe3925 | 170 | |
962749af AM |
171 | static inline unsigned fls_long(unsigned long l) |
172 | { | |
173 | if (sizeof(l) == 4) | |
174 | return fls(l); | |
175 | return fls64(l); | |
176 | } | |
177 | ||
952043ac SW |
178 | /** |
179 | * __ffs64 - find first set bit in a 64 bit word | |
180 | * @word: The 64 bit word | |
181 | * | |
182 | * On 64 bit arches this is a synomyn for __ffs | |
183 | * The result is not defined if no bits are set, so check that @word | |
184 | * is non-zero before calling this. | |
185 | */ | |
186 | static inline unsigned long __ffs64(u64 word) | |
187 | { | |
188 | #if BITS_PER_LONG == 32 | |
189 | if (((u32)word) == 0UL) | |
190 | return __ffs((u32)(word >> 32)) + 32; | |
191 | #elif BITS_PER_LONG != 64 | |
192 | #error BITS_PER_LONG not 32 or 64 | |
193 | #endif | |
194 | return __ffs((unsigned long)word); | |
195 | } | |
196 | ||
64970b68 | 197 | #ifdef __KERNEL__ |
77b9bd9c | 198 | |
00a1a053 TT |
199 | #ifndef set_mask_bits |
200 | #define set_mask_bits(ptr, _mask, _bits) \ | |
201 | ({ \ | |
202 | const typeof(*ptr) mask = (_mask), bits = (_bits); \ | |
203 | typeof(*ptr) old, new; \ | |
204 | \ | |
205 | do { \ | |
206 | old = ACCESS_ONCE(*ptr); \ | |
207 | new = (old & ~mask) | bits; \ | |
208 | } while (cmpxchg(ptr, old, new) != old); \ | |
209 | \ | |
210 | new; \ | |
211 | }) | |
212 | #endif | |
213 | ||
19de85ef | 214 | #ifndef find_last_bit |
ab53d472 RR |
215 | /** |
216 | * find_last_bit - find the last set bit in a memory region | |
217 | * @addr: The address to start the search at | |
218 | * @size: The maximum size to search | |
219 | * | |
220 | * Returns the bit number of the first set bit, or size. | |
221 | */ | |
222 | extern unsigned long find_last_bit(const unsigned long *addr, | |
223 | unsigned long size); | |
19de85ef | 224 | #endif |
ab53d472 | 225 | |
64970b68 | 226 | #endif /* __KERNEL__ */ |
1da177e4 | 227 | #endif |