KVM: x86: Use macros for x86_emulate_ops to avoid future mistakes
[deliverable/linux.git] / include / linux / bitops.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_BITOPS_H
2#define _LINUX_BITOPS_H
3#include <asm/types.h>
4
d05be13b 5#ifdef __KERNEL__
93043ece 6#define BIT(nr) (1UL << (nr))
d05be13b
JS
7#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
8#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
d05be13b 9#define BITS_PER_BYTE 8
ede9c697 10#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
d05be13b
JS
11#endif
12
1da177e4
LT
13/*
14 * Include this here because some architectures need generic_ffs/fls in
15 * scope
16 */
17#include <asm/bitops.h>
18
3e037454
SN
19#define for_each_bit(bit, addr, size) \
20 for ((bit) = find_first_bit((addr), (size)); \
21 (bit) < (size); \
22 (bit) = find_next_bit((addr), (size), (bit) + 1))
23
24
1da177e4
LT
25static __inline__ int get_bitmask_order(unsigned int count)
26{
27 int order;
9f41699e 28
1da177e4
LT
29 order = fls(count);
30 return order; /* We could be slightly more clever with -1 here... */
31}
32
94605eff
SS
33static __inline__ int get_count_order(unsigned int count)
34{
35 int order;
9f41699e 36
94605eff
SS
37 order = fls(count) - 1;
38 if (count & (count - 1))
39 order++;
40 return order;
41}
42
1da177e4
LT
43static inline unsigned long hweight_long(unsigned long w)
44{
e9bebd6f 45 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
1da177e4
LT
46}
47
fce877e3
PZ
48/*
49 * Clearly slow versions of the hweightN() functions, their benefit is
50 * of course compile time evaluation of constant arguments.
51 */
52#define HWEIGHT8(w) \
53 ( BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + \
54 (!!((w) & (1ULL << 0))) + \
55 (!!((w) & (1ULL << 1))) + \
56 (!!((w) & (1ULL << 2))) + \
57 (!!((w) & (1ULL << 3))) + \
58 (!!((w) & (1ULL << 4))) + \
59 (!!((w) & (1ULL << 5))) + \
60 (!!((w) & (1ULL << 6))) + \
9f41699e
PZ
61 (!!((w) & (1ULL << 7))) )
62
fce877e3
PZ
63#define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8((w) >> 8))
64#define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16((w) >> 16))
65#define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32((w) >> 32))
66
67/*
68 * Type invariant version that simply casts things to the
69 * largest type.
70 */
71#define HWEIGHT(w) HWEIGHT64((u64)(w))
9f41699e 72
45f8bde0 73/**
1da177e4 74 * rol32 - rotate a 32-bit value left
1da177e4
LT
75 * @word: value to rotate
76 * @shift: bits to roll
77 */
78static inline __u32 rol32(__u32 word, unsigned int shift)
79{
80 return (word << shift) | (word >> (32 - shift));
81}
82
45f8bde0 83/**
1da177e4 84 * ror32 - rotate a 32-bit value right
1da177e4
LT
85 * @word: value to rotate
86 * @shift: bits to roll
87 */
88static inline __u32 ror32(__u32 word, unsigned int shift)
89{
90 return (word >> shift) | (word << (32 - shift));
91}
92
3afe3925
HH
93/**
94 * rol16 - rotate a 16-bit value left
95 * @word: value to rotate
96 * @shift: bits to roll
97 */
98static inline __u16 rol16(__u16 word, unsigned int shift)
99{
100 return (word << shift) | (word >> (16 - shift));
101}
102
103/**
104 * ror16 - rotate a 16-bit value right
105 * @word: value to rotate
106 * @shift: bits to roll
107 */
108static inline __u16 ror16(__u16 word, unsigned int shift)
109{
110 return (word >> shift) | (word << (16 - shift));
111}
112
113/**
114 * rol8 - rotate an 8-bit value left
115 * @word: value to rotate
116 * @shift: bits to roll
117 */
118static inline __u8 rol8(__u8 word, unsigned int shift)
119{
120 return (word << shift) | (word >> (8 - shift));
121}
122
123/**
124 * ror8 - rotate an 8-bit value right
125 * @word: value to rotate
126 * @shift: bits to roll
127 */
128static inline __u8 ror8(__u8 word, unsigned int shift)
129{
130 return (word >> shift) | (word << (8 - shift));
131}
132
962749af
AM
133static inline unsigned fls_long(unsigned long l)
134{
135 if (sizeof(l) == 4)
136 return fls(l);
137 return fls64(l);
138}
139
952043ac
SW
140/**
141 * __ffs64 - find first set bit in a 64 bit word
142 * @word: The 64 bit word
143 *
144 * On 64 bit arches this is a synomyn for __ffs
145 * The result is not defined if no bits are set, so check that @word
146 * is non-zero before calling this.
147 */
148static inline unsigned long __ffs64(u64 word)
149{
150#if BITS_PER_LONG == 32
151 if (((u32)word) == 0UL)
152 return __ffs((u32)(word >> 32)) + 32;
153#elif BITS_PER_LONG != 64
154#error BITS_PER_LONG not 32 or 64
155#endif
156 return __ffs((unsigned long)word);
157}
158
64970b68 159#ifdef __KERNEL__
77b9bd9c 160#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
77b9bd9c
AH
161
162/**
163 * find_first_bit - find the first set bit in a memory region
164 * @addr: The address to start the search at
165 * @size: The maximum size to search
166 *
167 * Returns the bit number of the first set bit.
168 */
fee4b19f
TG
169extern unsigned long find_first_bit(const unsigned long *addr,
170 unsigned long size);
77b9bd9c
AH
171
172/**
173 * find_first_zero_bit - find the first cleared bit in a memory region
174 * @addr: The address to start the search at
175 * @size: The maximum size to search
176 *
177 * Returns the bit number of the first cleared bit.
178 */
fee4b19f
TG
179extern unsigned long find_first_zero_bit(const unsigned long *addr,
180 unsigned long size);
77b9bd9c
AH
181#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
182
ab53d472
RR
183#ifdef CONFIG_GENERIC_FIND_LAST_BIT
184/**
185 * find_last_bit - find the last set bit in a memory region
186 * @addr: The address to start the search at
187 * @size: The maximum size to search
188 *
189 * Returns the bit number of the first set bit, or size.
190 */
191extern unsigned long find_last_bit(const unsigned long *addr,
192 unsigned long size);
193#endif /* CONFIG_GENERIC_FIND_LAST_BIT */
194
64970b68 195#ifdef CONFIG_GENERIC_FIND_NEXT_BIT
64970b68
AH
196
197/**
198 * find_next_bit - find the next set bit in a memory region
199 * @addr: The address to base the search on
200 * @offset: The bitnumber to start searching at
201 * @size: The bitmap size in bits
202 */
fee4b19f
TG
203extern unsigned long find_next_bit(const unsigned long *addr,
204 unsigned long size, unsigned long offset);
64970b68
AH
205
206/**
207 * find_next_zero_bit - find the next cleared bit in a memory region
208 * @addr: The address to base the search on
209 * @offset: The bitnumber to start searching at
210 * @size: The bitmap size in bits
211 */
fee4b19f
TG
212
213extern unsigned long find_next_zero_bit(const unsigned long *addr,
214 unsigned long size,
215 unsigned long offset);
216
64970b68
AH
217#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
218#endif /* __KERNEL__ */
1da177e4 219#endif
This page took 0.5263 seconds and 5 git commands to generate.