Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _X86_64_BITOPS_H |
2 | #define _X86_64_BITOPS_H | |
3 | ||
4 | /* | |
5 | * Copyright 1992, Linus Torvalds. | |
6 | */ | |
7 | ||
8 | #include <linux/config.h> | |
9 | ||
10 | #ifdef CONFIG_SMP | |
11 | #define LOCK_PREFIX "lock ; " | |
12 | #else | |
13 | #define LOCK_PREFIX "" | |
14 | #endif | |
15 | ||
16 | #define ADDR (*(volatile long *) addr) | |
17 | ||
18 | /** | |
19 | * set_bit - Atomically set a bit in memory | |
20 | * @nr: the bit to set | |
21 | * @addr: the address to start counting from | |
22 | * | |
23 | * This function is atomic and may not be reordered. See __set_bit() | |
24 | * if you do not require the atomic guarantees. | |
25 | * Note that @nr may be almost arbitrarily large; this function is not | |
26 | * restricted to acting on a single-word quantity. | |
27 | */ | |
28 | static __inline__ void set_bit(int nr, volatile void * addr) | |
29 | { | |
30 | __asm__ __volatile__( LOCK_PREFIX | |
31 | "btsl %1,%0" | |
92934bcb | 32 | :"+m" (ADDR) |
1da177e4 LT |
33 | :"dIr" (nr) : "memory"); |
34 | } | |
35 | ||
36 | /** | |
37 | * __set_bit - Set a bit in memory | |
38 | * @nr: the bit to set | |
39 | * @addr: the address to start counting from | |
40 | * | |
41 | * Unlike set_bit(), this function is non-atomic and may be reordered. | |
42 | * If it's called on the same region of memory simultaneously, the effect | |
43 | * may be that only one operation succeeds. | |
44 | */ | |
45 | static __inline__ void __set_bit(int nr, volatile void * addr) | |
46 | { | |
47 | __asm__ volatile( | |
48 | "btsl %1,%0" | |
92934bcb | 49 | :"+m" (ADDR) |
1da177e4 LT |
50 | :"dIr" (nr) : "memory"); |
51 | } | |
52 | ||
53 | /** | |
54 | * clear_bit - Clears a bit in memory | |
55 | * @nr: Bit to clear | |
56 | * @addr: Address to start counting from | |
57 | * | |
58 | * clear_bit() is atomic and may not be reordered. However, it does | |
59 | * not contain a memory barrier, so if it is used for locking purposes, | |
60 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | |
61 | * in order to ensure changes are visible on other processors. | |
62 | */ | |
63 | static __inline__ void clear_bit(int nr, volatile void * addr) | |
64 | { | |
65 | __asm__ __volatile__( LOCK_PREFIX | |
66 | "btrl %1,%0" | |
92934bcb | 67 | :"+m" (ADDR) |
1da177e4 LT |
68 | :"dIr" (nr)); |
69 | } | |
70 | ||
71 | static __inline__ void __clear_bit(int nr, volatile void * addr) | |
72 | { | |
73 | __asm__ __volatile__( | |
74 | "btrl %1,%0" | |
92934bcb | 75 | :"+m" (ADDR) |
1da177e4 LT |
76 | :"dIr" (nr)); |
77 | } | |
78 | ||
79 | #define smp_mb__before_clear_bit() barrier() | |
80 | #define smp_mb__after_clear_bit() barrier() | |
81 | ||
82 | /** | |
83 | * __change_bit - Toggle a bit in memory | |
84 | * @nr: the bit to change | |
85 | * @addr: the address to start counting from | |
86 | * | |
87 | * Unlike change_bit(), this function is non-atomic and may be reordered. | |
88 | * If it's called on the same region of memory simultaneously, the effect | |
89 | * may be that only one operation succeeds. | |
90 | */ | |
91 | static __inline__ void __change_bit(int nr, volatile void * addr) | |
92 | { | |
93 | __asm__ __volatile__( | |
94 | "btcl %1,%0" | |
92934bcb | 95 | :"+m" (ADDR) |
1da177e4 LT |
96 | :"dIr" (nr)); |
97 | } | |
98 | ||
99 | /** | |
100 | * change_bit - Toggle a bit in memory | |
101 | * @nr: Bit to change | |
102 | * @addr: Address to start counting from | |
103 | * | |
104 | * change_bit() is atomic and may not be reordered. | |
105 | * Note that @nr may be almost arbitrarily large; this function is not | |
106 | * restricted to acting on a single-word quantity. | |
107 | */ | |
108 | static __inline__ void change_bit(int nr, volatile void * addr) | |
109 | { | |
110 | __asm__ __volatile__( LOCK_PREFIX | |
111 | "btcl %1,%0" | |
92934bcb | 112 | :"+m" (ADDR) |
1da177e4 LT |
113 | :"dIr" (nr)); |
114 | } | |
115 | ||
116 | /** | |
117 | * test_and_set_bit - Set a bit and return its old value | |
118 | * @nr: Bit to set | |
119 | * @addr: Address to count from | |
120 | * | |
121 | * This operation is atomic and cannot be reordered. | |
122 | * It also implies a memory barrier. | |
123 | */ | |
124 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) | |
125 | { | |
126 | int oldbit; | |
127 | ||
128 | __asm__ __volatile__( LOCK_PREFIX | |
129 | "btsl %2,%1\n\tsbbl %0,%0" | |
92934bcb | 130 | :"=r" (oldbit),"+m" (ADDR) |
1da177e4 LT |
131 | :"dIr" (nr) : "memory"); |
132 | return oldbit; | |
133 | } | |
134 | ||
135 | /** | |
136 | * __test_and_set_bit - Set a bit and return its old value | |
137 | * @nr: Bit to set | |
138 | * @addr: Address to count from | |
139 | * | |
140 | * This operation is non-atomic and can be reordered. | |
141 | * If two examples of this operation race, one can appear to succeed | |
142 | * but actually fail. You must protect multiple accesses with a lock. | |
143 | */ | |
144 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | |
145 | { | |
146 | int oldbit; | |
147 | ||
148 | __asm__( | |
149 | "btsl %2,%1\n\tsbbl %0,%0" | |
92934bcb | 150 | :"=r" (oldbit),"+m" (ADDR) |
1da177e4 LT |
151 | :"dIr" (nr)); |
152 | return oldbit; | |
153 | } | |
154 | ||
155 | /** | |
156 | * test_and_clear_bit - Clear a bit and return its old value | |
157 | * @nr: Bit to clear | |
158 | * @addr: Address to count from | |
159 | * | |
160 | * This operation is atomic and cannot be reordered. | |
161 | * It also implies a memory barrier. | |
162 | */ | |
163 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | |
164 | { | |
165 | int oldbit; | |
166 | ||
167 | __asm__ __volatile__( LOCK_PREFIX | |
168 | "btrl %2,%1\n\tsbbl %0,%0" | |
92934bcb | 169 | :"=r" (oldbit),"+m" (ADDR) |
1da177e4 LT |
170 | :"dIr" (nr) : "memory"); |
171 | return oldbit; | |
172 | } | |
173 | ||
174 | /** | |
175 | * __test_and_clear_bit - Clear a bit and return its old value | |
176 | * @nr: Bit to clear | |
177 | * @addr: Address to count from | |
178 | * | |
179 | * This operation is non-atomic and can be reordered. | |
180 | * If two examples of this operation race, one can appear to succeed | |
181 | * but actually fail. You must protect multiple accesses with a lock. | |
182 | */ | |
183 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | |
184 | { | |
185 | int oldbit; | |
186 | ||
187 | __asm__( | |
188 | "btrl %2,%1\n\tsbbl %0,%0" | |
92934bcb | 189 | :"=r" (oldbit),"+m" (ADDR) |
1da177e4 LT |
190 | :"dIr" (nr)); |
191 | return oldbit; | |
192 | } | |
193 | ||
194 | /* WARNING: non atomic and it can be reordered! */ | |
195 | static __inline__ int __test_and_change_bit(int nr, volatile void * addr) | |
196 | { | |
197 | int oldbit; | |
198 | ||
199 | __asm__ __volatile__( | |
200 | "btcl %2,%1\n\tsbbl %0,%0" | |
92934bcb | 201 | :"=r" (oldbit),"+m" (ADDR) |
1da177e4 LT |
202 | :"dIr" (nr) : "memory"); |
203 | return oldbit; | |
204 | } | |
205 | ||
206 | /** | |
207 | * test_and_change_bit - Change a bit and return its old value | |
208 | * @nr: Bit to change | |
209 | * @addr: Address to count from | |
210 | * | |
211 | * This operation is atomic and cannot be reordered. | |
212 | * It also implies a memory barrier. | |
213 | */ | |
214 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) | |
215 | { | |
216 | int oldbit; | |
217 | ||
218 | __asm__ __volatile__( LOCK_PREFIX | |
219 | "btcl %2,%1\n\tsbbl %0,%0" | |
92934bcb | 220 | :"=r" (oldbit),"+m" (ADDR) |
1da177e4 LT |
221 | :"dIr" (nr) : "memory"); |
222 | return oldbit; | |
223 | } | |
224 | ||
225 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ | |
226 | /** | |
227 | * test_bit - Determine whether a bit is set | |
228 | * @nr: bit number to test | |
229 | * @addr: Address to start counting from | |
230 | */ | |
231 | static int test_bit(int nr, const volatile void * addr); | |
232 | #endif | |
233 | ||
234 | static __inline__ int constant_test_bit(int nr, const volatile void * addr) | |
235 | { | |
236 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; | |
237 | } | |
238 | ||
239 | static __inline__ int variable_test_bit(int nr, volatile const void * addr) | |
240 | { | |
241 | int oldbit; | |
242 | ||
243 | __asm__ __volatile__( | |
244 | "btl %2,%1\n\tsbbl %0,%0" | |
245 | :"=r" (oldbit) | |
246 | :"m" (ADDR),"dIr" (nr)); | |
247 | return oldbit; | |
248 | } | |
249 | ||
250 | #define test_bit(nr,addr) \ | |
251 | (__builtin_constant_p(nr) ? \ | |
252 | constant_test_bit((nr),(addr)) : \ | |
253 | variable_test_bit((nr),(addr))) | |
254 | ||
255 | #undef ADDR | |
256 | ||
257 | extern long find_first_zero_bit(const unsigned long * addr, unsigned long size); | |
258 | extern long find_next_zero_bit (const unsigned long * addr, long size, long offset); | |
259 | extern long find_first_bit(const unsigned long * addr, unsigned long size); | |
260 | extern long find_next_bit(const unsigned long * addr, long size, long offset); | |
261 | ||
262 | /* return index of first bet set in val or max when no bit is set */ | |
263 | static inline unsigned long __scanbit(unsigned long val, unsigned long max) | |
264 | { | |
265 | asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max)); | |
266 | return val; | |
267 | } | |
268 | ||
269 | #define find_first_bit(addr,size) \ | |
270 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | |
271 | (__scanbit(*(unsigned long *)addr,(size))) : \ | |
272 | find_first_bit(addr,size))) | |
273 | ||
274 | #define find_next_bit(addr,size,off) \ | |
275 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | |
276 | ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \ | |
277 | find_next_bit(addr,size,off))) | |
278 | ||
279 | #define find_first_zero_bit(addr,size) \ | |
280 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | |
281 | (__scanbit(~*(unsigned long *)addr,(size))) : \ | |
282 | find_first_zero_bit(addr,size))) | |
283 | ||
284 | #define find_next_zero_bit(addr,size,off) \ | |
285 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | |
286 | ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \ | |
287 | find_next_zero_bit(addr,size,off))) | |
288 | ||
289 | /* | |
290 | * Find string of zero bits in a bitmap. -1 when not found. | |
291 | */ | |
292 | extern unsigned long | |
293 | find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len); | |
294 | ||
295 | static inline void set_bit_string(unsigned long *bitmap, unsigned long i, | |
296 | int len) | |
297 | { | |
298 | unsigned long end = i + len; | |
299 | while (i < end) { | |
300 | __set_bit(i, bitmap); | |
301 | i++; | |
302 | } | |
303 | } | |
304 | ||
305 | static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i, | |
306 | int len) | |
307 | { | |
308 | unsigned long end = i + len; | |
309 | while (i < end) { | |
310 | __clear_bit(i, bitmap); | |
311 | i++; | |
312 | } | |
313 | } | |
314 | ||
315 | /** | |
316 | * ffz - find first zero in word. | |
317 | * @word: The word to search | |
318 | * | |
319 | * Undefined if no zero exists, so code should check against ~0UL first. | |
320 | */ | |
321 | static __inline__ unsigned long ffz(unsigned long word) | |
322 | { | |
323 | __asm__("bsfq %1,%0" | |
324 | :"=r" (word) | |
325 | :"r" (~word)); | |
326 | return word; | |
327 | } | |
328 | ||
329 | /** | |
330 | * __ffs - find first bit in word. | |
331 | * @word: The word to search | |
332 | * | |
333 | * Undefined if no bit exists, so code should check against 0 first. | |
334 | */ | |
335 | static __inline__ unsigned long __ffs(unsigned long word) | |
336 | { | |
337 | __asm__("bsfq %1,%0" | |
338 | :"=r" (word) | |
339 | :"rm" (word)); | |
340 | return word; | |
341 | } | |
342 | ||
90933fc8 SH |
343 | /* |
344 | * __fls: find last bit set. | |
345 | * @word: The word to search | |
346 | * | |
347 | * Undefined if no zero exists, so code should check against ~0UL first. | |
348 | */ | |
349 | static __inline__ unsigned long __fls(unsigned long word) | |
350 | { | |
351 | __asm__("bsrq %1,%0" | |
352 | :"=r" (word) | |
353 | :"rm" (word)); | |
354 | return word; | |
355 | } | |
356 | ||
1da177e4 LT |
357 | #ifdef __KERNEL__ |
358 | ||
359 | static inline int sched_find_first_bit(const unsigned long *b) | |
360 | { | |
361 | if (b[0]) | |
362 | return __ffs(b[0]); | |
363 | if (b[1]) | |
364 | return __ffs(b[1]) + 64; | |
8d224d32 | 365 | return __ffs(b[2]) + 128; |
1da177e4 LT |
366 | } |
367 | ||
368 | /** | |
369 | * ffs - find first bit set | |
370 | * @x: the word to search | |
371 | * | |
372 | * This is defined the same way as | |
373 | * the libc and compiler builtin ffs routines, therefore | |
374 | * differs in spirit from the above ffz (man ffs). | |
375 | */ | |
376 | static __inline__ int ffs(int x) | |
377 | { | |
378 | int r; | |
379 | ||
380 | __asm__("bsfl %1,%0\n\t" | |
381 | "cmovzl %2,%0" | |
382 | : "=r" (r) : "rm" (x), "r" (-1)); | |
383 | return r+1; | |
384 | } | |
385 | ||
90933fc8 SH |
386 | /** |
387 | * fls64 - find last bit set in 64 bit word | |
388 | * @x: the word to search | |
389 | * | |
390 | * This is defined the same way as fls. | |
391 | */ | |
392 | static __inline__ int fls64(__u64 x) | |
393 | { | |
394 | if (x == 0) | |
395 | return 0; | |
396 | return __fls(x) + 1; | |
397 | } | |
398 | ||
636dd2b7 SH |
399 | /** |
400 | * fls - find last bit set | |
401 | * @x: the word to search | |
402 | * | |
403 | * This is defined the same way as ffs. | |
404 | */ | |
405 | static __inline__ int fls(int x) | |
406 | { | |
407 | int r; | |
408 | ||
409 | __asm__("bsrl %1,%0\n\t" | |
410 | "cmovzl %2,%0" | |
411 | : "=&r" (r) : "rm" (x), "rm" (-1)); | |
412 | return r+1; | |
413 | } | |
414 | ||
1da177e4 LT |
415 | /** |
416 | * hweightN - returns the hamming weight of a N-bit word | |
417 | * @x: the word to weigh | |
418 | * | |
419 | * The Hamming Weight of a number is the total number of bits set in it. | |
420 | */ | |
421 | ||
422 | #define hweight64(x) generic_hweight64(x) | |
423 | #define hweight32(x) generic_hweight32(x) | |
424 | #define hweight16(x) generic_hweight16(x) | |
425 | #define hweight8(x) generic_hweight8(x) | |
426 | ||
427 | #endif /* __KERNEL__ */ | |
428 | ||
429 | #ifdef __KERNEL__ | |
430 | ||
431 | #define ext2_set_bit(nr,addr) \ | |
432 | __test_and_set_bit((nr),(unsigned long*)addr) | |
433 | #define ext2_set_bit_atomic(lock,nr,addr) \ | |
434 | test_and_set_bit((nr),(unsigned long*)addr) | |
435 | #define ext2_clear_bit(nr, addr) \ | |
436 | __test_and_clear_bit((nr),(unsigned long*)addr) | |
437 | #define ext2_clear_bit_atomic(lock,nr,addr) \ | |
438 | test_and_clear_bit((nr),(unsigned long*)addr) | |
439 | #define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr) | |
440 | #define ext2_find_first_zero_bit(addr, size) \ | |
441 | find_first_zero_bit((unsigned long*)addr, size) | |
442 | #define ext2_find_next_zero_bit(addr, size, off) \ | |
443 | find_next_zero_bit((unsigned long*)addr, size, off) | |
444 | ||
445 | /* Bitmap functions for the minix filesystem. */ | |
446 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr) | |
447 | #define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr) | |
448 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr) | |
449 | #define minix_test_bit(nr,addr) test_bit(nr,(void*)addr) | |
450 | #define minix_find_first_zero_bit(addr,size) \ | |
451 | find_first_zero_bit((void*)addr,size) | |
452 | ||
1da177e4 LT |
453 | #endif /* __KERNEL__ */ |
454 | ||
455 | #endif /* _X86_64_BITOPS_H */ |