Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_BITOPS_H |
2 | #define _ASM_IA64_BITOPS_H | |
3 | ||
4 | /* | |
5 | * Copyright (C) 1998-2003 Hewlett-Packard Co | |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
7 | * | |
2875aef8 AM |
8 | * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 |
9 | * O(1) scheduler patch | |
1da177e4 LT |
10 | */ |
11 | ||
12 | #include <linux/compiler.h> | |
13 | #include <linux/types.h> | |
14 | #include <asm/bitops.h> | |
15 | #include <asm/intrinsics.h> | |
16 | ||
17 | /** | |
18 | * set_bit - Atomically set a bit in memory | |
19 | * @nr: the bit to set | |
20 | * @addr: the address to start counting from | |
21 | * | |
22 | * This function is atomic and may not be reordered. See __set_bit() | |
23 | * if you do not require the atomic guarantees. | |
24 | * Note that @nr may be almost arbitrarily large; this function is not | |
25 | * restricted to acting on a single-word quantity. | |
26 | * | |
27 | * The address must be (at least) "long" aligned. | |
2875aef8 AM |
28 | * Note that there are driver (e.g., eepro100) which use these operations to |
29 | * operate on hw-defined data-structures, so we can't easily change these | |
30 | * operations to force a bigger alignment. | |
1da177e4 LT |
31 | * |
32 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | |
33 | */ | |
34 | static __inline__ void | |
35 | set_bit (int nr, volatile void *addr) | |
36 | { | |
37 | __u32 bit, old, new; | |
38 | volatile __u32 *m; | |
39 | CMPXCHG_BUGCHECK_DECL | |
40 | ||
41 | m = (volatile __u32 *) addr + (nr >> 5); | |
42 | bit = 1 << (nr & 31); | |
43 | do { | |
44 | CMPXCHG_BUGCHECK(m); | |
45 | old = *m; | |
46 | new = old | bit; | |
47 | } while (cmpxchg_acq(m, old, new) != old); | |
48 | } | |
49 | ||
50 | /** | |
51 | * __set_bit - Set a bit in memory | |
52 | * @nr: the bit to set | |
53 | * @addr: the address to start counting from | |
54 | * | |
55 | * Unlike set_bit(), this function is non-atomic and may be reordered. | |
56 | * If it's called on the same region of memory simultaneously, the effect | |
57 | * may be that only one operation succeeds. | |
58 | */ | |
59 | static __inline__ void | |
60 | __set_bit (int nr, volatile void *addr) | |
61 | { | |
62 | *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); | |
63 | } | |
64 | ||
65 | /* | |
66 | * clear_bit() has "acquire" semantics. | |
67 | */ | |
68 | #define smp_mb__before_clear_bit() smp_mb() | |
69 | #define smp_mb__after_clear_bit() do { /* skip */; } while (0) | |
70 | ||
71 | /** | |
72 | * clear_bit - Clears a bit in memory | |
73 | * @nr: Bit to clear | |
74 | * @addr: Address to start counting from | |
75 | * | |
76 | * clear_bit() is atomic and may not be reordered. However, it does | |
77 | * not contain a memory barrier, so if it is used for locking purposes, | |
78 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | |
79 | * in order to ensure changes are visible on other processors. | |
80 | */ | |
81 | static __inline__ void | |
82 | clear_bit (int nr, volatile void *addr) | |
83 | { | |
84 | __u32 mask, old, new; | |
85 | volatile __u32 *m; | |
86 | CMPXCHG_BUGCHECK_DECL | |
87 | ||
88 | m = (volatile __u32 *) addr + (nr >> 5); | |
89 | mask = ~(1 << (nr & 31)); | |
90 | do { | |
91 | CMPXCHG_BUGCHECK(m); | |
92 | old = *m; | |
93 | new = old & mask; | |
94 | } while (cmpxchg_acq(m, old, new) != old); | |
95 | } | |
96 | ||
97 | /** | |
98 | * __clear_bit - Clears a bit in memory (non-atomic version) | |
99 | */ | |
100 | static __inline__ void | |
101 | __clear_bit (int nr, volatile void *addr) | |
102 | { | |
103 | volatile __u32 *p = (__u32 *) addr + (nr >> 5); | |
104 | __u32 m = 1 << (nr & 31); | |
105 | *p &= ~m; | |
106 | } | |
107 | ||
108 | /** | |
109 | * change_bit - Toggle a bit in memory | |
110 | * @nr: Bit to clear | |
111 | * @addr: Address to start counting from | |
112 | * | |
113 | * change_bit() is atomic and may not be reordered. | |
114 | * Note that @nr may be almost arbitrarily large; this function is not | |
115 | * restricted to acting on a single-word quantity. | |
116 | */ | |
117 | static __inline__ void | |
118 | change_bit (int nr, volatile void *addr) | |
119 | { | |
120 | __u32 bit, old, new; | |
121 | volatile __u32 *m; | |
122 | CMPXCHG_BUGCHECK_DECL | |
123 | ||
124 | m = (volatile __u32 *) addr + (nr >> 5); | |
125 | bit = (1 << (nr & 31)); | |
126 | do { | |
127 | CMPXCHG_BUGCHECK(m); | |
128 | old = *m; | |
129 | new = old ^ bit; | |
130 | } while (cmpxchg_acq(m, old, new) != old); | |
131 | } | |
132 | ||
133 | /** | |
134 | * __change_bit - Toggle a bit in memory | |
135 | * @nr: the bit to set | |
136 | * @addr: the address to start counting from | |
137 | * | |
138 | * Unlike change_bit(), this function is non-atomic and may be reordered. | |
139 | * If it's called on the same region of memory simultaneously, the effect | |
140 | * may be that only one operation succeeds. | |
141 | */ | |
142 | static __inline__ void | |
143 | __change_bit (int nr, volatile void *addr) | |
144 | { | |
145 | *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31)); | |
146 | } | |
147 | ||
148 | /** | |
149 | * test_and_set_bit - Set a bit and return its old value | |
150 | * @nr: Bit to set | |
151 | * @addr: Address to count from | |
152 | * | |
153 | * This operation is atomic and cannot be reordered. | |
154 | * It also implies a memory barrier. | |
155 | */ | |
156 | static __inline__ int | |
157 | test_and_set_bit (int nr, volatile void *addr) | |
158 | { | |
159 | __u32 bit, old, new; | |
160 | volatile __u32 *m; | |
161 | CMPXCHG_BUGCHECK_DECL | |
162 | ||
163 | m = (volatile __u32 *) addr + (nr >> 5); | |
164 | bit = 1 << (nr & 31); | |
165 | do { | |
166 | CMPXCHG_BUGCHECK(m); | |
167 | old = *m; | |
168 | new = old | bit; | |
169 | } while (cmpxchg_acq(m, old, new) != old); | |
170 | return (old & bit) != 0; | |
171 | } | |
172 | ||
173 | /** | |
174 | * __test_and_set_bit - Set a bit and return its old value | |
175 | * @nr: Bit to set | |
176 | * @addr: Address to count from | |
177 | * | |
178 | * This operation is non-atomic and can be reordered. | |
179 | * If two examples of this operation race, one can appear to succeed | |
180 | * but actually fail. You must protect multiple accesses with a lock. | |
181 | */ | |
182 | static __inline__ int | |
183 | __test_and_set_bit (int nr, volatile void *addr) | |
184 | { | |
185 | __u32 *p = (__u32 *) addr + (nr >> 5); | |
186 | __u32 m = 1 << (nr & 31); | |
187 | int oldbitset = (*p & m) != 0; | |
188 | ||
189 | *p |= m; | |
190 | return oldbitset; | |
191 | } | |
192 | ||
193 | /** | |
194 | * test_and_clear_bit - Clear a bit and return its old value | |
195 | * @nr: Bit to set | |
196 | * @addr: Address to count from | |
197 | * | |
198 | * This operation is atomic and cannot be reordered. | |
199 | * It also implies a memory barrier. | |
200 | */ | |
201 | static __inline__ int | |
202 | test_and_clear_bit (int nr, volatile void *addr) | |
203 | { | |
204 | __u32 mask, old, new; | |
205 | volatile __u32 *m; | |
206 | CMPXCHG_BUGCHECK_DECL | |
207 | ||
208 | m = (volatile __u32 *) addr + (nr >> 5); | |
209 | mask = ~(1 << (nr & 31)); | |
210 | do { | |
211 | CMPXCHG_BUGCHECK(m); | |
212 | old = *m; | |
213 | new = old & mask; | |
214 | } while (cmpxchg_acq(m, old, new) != old); | |
215 | return (old & ~mask) != 0; | |
216 | } | |
217 | ||
218 | /** | |
219 | * __test_and_clear_bit - Clear a bit and return its old value | |
220 | * @nr: Bit to set | |
221 | * @addr: Address to count from | |
222 | * | |
223 | * This operation is non-atomic and can be reordered. | |
224 | * If two examples of this operation race, one can appear to succeed | |
225 | * but actually fail. You must protect multiple accesses with a lock. | |
226 | */ | |
227 | static __inline__ int | |
228 | __test_and_clear_bit(int nr, volatile void * addr) | |
229 | { | |
230 | __u32 *p = (__u32 *) addr + (nr >> 5); | |
231 | __u32 m = 1 << (nr & 31); | |
232 | int oldbitset = *p & m; | |
233 | ||
234 | *p &= ~m; | |
235 | return oldbitset; | |
236 | } | |
237 | ||
238 | /** | |
239 | * test_and_change_bit - Change a bit and return its old value | |
240 | * @nr: Bit to set | |
241 | * @addr: Address to count from | |
242 | * | |
243 | * This operation is atomic and cannot be reordered. | |
244 | * It also implies a memory barrier. | |
245 | */ | |
246 | static __inline__ int | |
247 | test_and_change_bit (int nr, volatile void *addr) | |
248 | { | |
249 | __u32 bit, old, new; | |
250 | volatile __u32 *m; | |
251 | CMPXCHG_BUGCHECK_DECL | |
252 | ||
253 | m = (volatile __u32 *) addr + (nr >> 5); | |
254 | bit = (1 << (nr & 31)); | |
255 | do { | |
256 | CMPXCHG_BUGCHECK(m); | |
257 | old = *m; | |
258 | new = old ^ bit; | |
259 | } while (cmpxchg_acq(m, old, new) != old); | |
260 | return (old & bit) != 0; | |
261 | } | |
262 | ||
263 | /* | |
264 | * WARNING: non atomic version. | |
265 | */ | |
266 | static __inline__ int | |
267 | __test_and_change_bit (int nr, void *addr) | |
268 | { | |
269 | __u32 old, bit = (1 << (nr & 31)); | |
270 | __u32 *m = (__u32 *) addr + (nr >> 5); | |
271 | ||
272 | old = *m; | |
273 | *m = old ^ bit; | |
274 | return (old & bit) != 0; | |
275 | } | |
276 | ||
277 | static __inline__ int | |
278 | test_bit (int nr, const volatile void *addr) | |
279 | { | |
280 | return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); | |
281 | } | |
282 | ||
283 | /** | |
284 | * ffz - find the first zero bit in a long word | |
285 | * @x: The long word to find the bit in | |
286 | * | |
2875aef8 AM |
287 | * Returns the bit-number (0..63) of the first (least significant) zero bit. |
288 | * Undefined if no zero exists, so code should check against ~0UL first... | |
1da177e4 LT |
289 | */ |
290 | static inline unsigned long | |
291 | ffz (unsigned long x) | |
292 | { | |
293 | unsigned long result; | |
294 | ||
295 | result = ia64_popcnt(x & (~x - 1)); | |
296 | return result; | |
297 | } | |
298 | ||
299 | /** | |
300 | * __ffs - find first bit in word. | |
301 | * @x: The word to search | |
302 | * | |
303 | * Undefined if no bit exists, so code should check against 0 first. | |
304 | */ | |
305 | static __inline__ unsigned long | |
306 | __ffs (unsigned long x) | |
307 | { | |
308 | unsigned long result; | |
309 | ||
310 | result = ia64_popcnt((x-1) & ~x); | |
311 | return result; | |
312 | } | |
313 | ||
314 | #ifdef __KERNEL__ | |
315 | ||
316 | /* | |
821376bf DMT |
317 | * Return bit number of last (most-significant) bit set. Undefined |
318 | * for x==0. Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3). | |
1da177e4 LT |
319 | */ |
320 | static inline unsigned long | |
321 | ia64_fls (unsigned long x) | |
322 | { | |
323 | long double d = x; | |
324 | long exp; | |
325 | ||
326 | exp = ia64_getf_exp(d); | |
327 | return exp - 0xffff; | |
328 | } | |
329 | ||
821376bf DMT |
330 | /* |
331 | * Find the last (most significant) bit set. Returns 0 for x==0 and | |
332 | * bits are numbered from 1..32 (e.g., fls(9) == 4). | |
333 | */ | |
1da177e4 | 334 | static inline int |
821376bf | 335 | fls (int t) |
1da177e4 | 336 | { |
821376bf DMT |
337 | unsigned long x = t & 0xffffffffu; |
338 | ||
339 | if (!x) | |
340 | return 0; | |
341 | x |= x >> 1; | |
342 | x |= x >> 2; | |
343 | x |= x >> 4; | |
344 | x |= x >> 8; | |
345 | x |= x >> 16; | |
346 | return ia64_popcnt(x); | |
1da177e4 | 347 | } |
2875aef8 AM |
348 | |
349 | #include <asm-generic/bitops/fls64.h> | |
1da177e4 LT |
350 | |
351 | /* | |
2875aef8 AM |
352 | * ffs: find first bit set. This is defined the same way as the libc and |
353 | * compiler builtin ffs routines, therefore differs in spirit from the above | |
354 | * ffz (man ffs): it operates on "int" values only and the result value is the | |
355 | * bit number + 1. ffs(0) is defined to return zero. | |
1da177e4 LT |
356 | */ |
357 | #define ffs(x) __builtin_ffs(x) | |
358 | ||
359 | /* | |
360 | * hweightN: returns the hamming weight (i.e. the number | |
361 | * of bits set) of a N-bit word | |
362 | */ | |
363 | static __inline__ unsigned long | |
364 | hweight64 (unsigned long x) | |
365 | { | |
366 | unsigned long result; | |
367 | result = ia64_popcnt(x); | |
368 | return result; | |
369 | } | |
370 | ||
371 | #define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful) | |
372 | #define hweight16(x) (unsigned int) hweight64((x) & 0xfffful) | |
373 | #define hweight8(x) (unsigned int) hweight64((x) & 0xfful) | |
374 | ||
375 | #endif /* __KERNEL__ */ | |
376 | ||
2875aef8 | 377 | #include <asm-generic/bitops/find.h> |
1da177e4 LT |
378 | |
379 | #ifdef __KERNEL__ | |
380 | ||
2875aef8 | 381 | #include <asm-generic/bitops/ext2-non-atomic.h> |
1da177e4 | 382 | |
1da177e4 | 383 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
1da177e4 | 384 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
1da177e4 | 385 | |
2875aef8 AM |
386 | #include <asm-generic/bitops/minix.h> |
387 | #include <asm-generic/bitops/sched.h> | |
1da177e4 LT |
388 | |
389 | #endif /* __KERNEL__ */ | |
390 | ||
391 | #endif /* _ASM_IA64_BITOPS_H */ |