Merge git://git.infradead.org/battery-2.6
[deliverable/linux.git] / arch / s390 / include / asm / bitops.h
1 #ifndef _S390_BITOPS_H
2 #define _S390_BITOPS_H
3
4 /*
5 * include/asm-s390/bitops.h
6 *
7 * S390 version
8 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
10 *
11 * Derived from "include/asm-i386/bitops.h"
12 * Copyright (C) 1992, Linus Torvalds
13 *
14 */
15
16 #ifdef __KERNEL__
17
18 #ifndef _LINUX_BITOPS_H
19 #error only <linux/bitops.h> can be included directly
20 #endif
21
22 #include <linux/compiler.h>
23
24 /*
25 * 32 bit bitops format:
26 * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
27 * bit 32 is the LSB of *(addr+4). That combined with the
28 * big endian byte order on S390 give the following bit
29 * order in memory:
30 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
31 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
32 * after that follows the next long with bit numbers
33 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
34 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
35 * The reason for this bit ordering is the fact that
36 * in the architecture independent code bits operations
37 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
38 * with operation of the form "set_bit(bitnr, flags)".
39 *
40 * 64 bit bitops format:
41 * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
42 * bit 64 is the LSB of *(addr+8). That combined with the
43 * big endian byte order on S390 give the following bit
44 * order in memory:
45 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
46 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
47 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
48 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
49 * after that follows the next long with bit numbers
50 * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
51 * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
52 * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
53 * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
54 * The reason for this bit ordering is the fact that
55 * in the architecture independent code bits operations
56 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
57 * with operation of the form "set_bit(bitnr, flags)".
58 */
59
60 /* bitmap tables from arch/s390/kernel/bitmap.c */
61 extern const char _oi_bitmap[];
62 extern const char _ni_bitmap[];
63 extern const char _zb_findmap[];
64 extern const char _sb_findmap[];
65
66 #ifndef __s390x__
67
68 #define __BITOPS_ALIGN 3
69 #define __BITOPS_WORDSIZE 32
70 #define __BITOPS_OR "or"
71 #define __BITOPS_AND "nr"
72 #define __BITOPS_XOR "xr"
73
74 #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
75 asm volatile( \
76 " l %0,%2\n" \
77 "0: lr %1,%0\n" \
78 __op_string " %1,%3\n" \
79 " cs %0,%1,%2\n" \
80 " jl 0b" \
81 : "=&d" (__old), "=&d" (__new), \
82 "=Q" (*(unsigned long *) __addr) \
83 : "d" (__val), "Q" (*(unsigned long *) __addr) \
84 : "cc");
85
86 #else /* __s390x__ */
87
88 #define __BITOPS_ALIGN 7
89 #define __BITOPS_WORDSIZE 64
90 #define __BITOPS_OR "ogr"
91 #define __BITOPS_AND "ngr"
92 #define __BITOPS_XOR "xgr"
93
94 #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
95 asm volatile( \
96 " lg %0,%2\n" \
97 "0: lgr %1,%0\n" \
98 __op_string " %1,%3\n" \
99 " csg %0,%1,%2\n" \
100 " jl 0b" \
101 : "=&d" (__old), "=&d" (__new), \
102 "=Q" (*(unsigned long *) __addr) \
103 : "d" (__val), "Q" (*(unsigned long *) __addr) \
104 : "cc");
105
106 #endif /* __s390x__ */
107
108 #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
109 #define __BITOPS_BARRIER() asm volatile("" : : : "memory")
110
111 #ifdef CONFIG_SMP
112 /*
113 * SMP safe set_bit routine based on compare and swap (CS)
114 */
115 static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
116 {
117 unsigned long addr, old, new, mask;
118
119 addr = (unsigned long) ptr;
120 /* calculate address for CS */
121 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
122 /* make OR mask */
123 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
124 /* Do the atomic update. */
125 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
126 }
127
128 /*
129 * SMP safe clear_bit routine based on compare and swap (CS)
130 */
131 static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
132 {
133 unsigned long addr, old, new, mask;
134
135 addr = (unsigned long) ptr;
136 /* calculate address for CS */
137 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
138 /* make AND mask */
139 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
140 /* Do the atomic update. */
141 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
142 }
143
144 /*
145 * SMP safe change_bit routine based on compare and swap (CS)
146 */
147 static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
148 {
149 unsigned long addr, old, new, mask;
150
151 addr = (unsigned long) ptr;
152 /* calculate address for CS */
153 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
154 /* make XOR mask */
155 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
156 /* Do the atomic update. */
157 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
158 }
159
160 /*
161 * SMP safe test_and_set_bit routine based on compare and swap (CS)
162 */
163 static inline int
164 test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
165 {
166 unsigned long addr, old, new, mask;
167
168 addr = (unsigned long) ptr;
169 /* calculate address for CS */
170 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
171 /* make OR/test mask */
172 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
173 /* Do the atomic update. */
174 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
175 __BITOPS_BARRIER();
176 return (old & mask) != 0;
177 }
178
179 /*
180 * SMP safe test_and_clear_bit routine based on compare and swap (CS)
181 */
182 static inline int
183 test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
184 {
185 unsigned long addr, old, new, mask;
186
187 addr = (unsigned long) ptr;
188 /* calculate address for CS */
189 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
190 /* make AND/test mask */
191 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
192 /* Do the atomic update. */
193 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
194 __BITOPS_BARRIER();
195 return (old ^ new) != 0;
196 }
197
198 /*
199 * SMP safe test_and_change_bit routine based on compare and swap (CS)
200 */
201 static inline int
202 test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
203 {
204 unsigned long addr, old, new, mask;
205
206 addr = (unsigned long) ptr;
207 /* calculate address for CS */
208 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
209 /* make XOR/test mask */
210 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
211 /* Do the atomic update. */
212 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
213 __BITOPS_BARRIER();
214 return (old & mask) != 0;
215 }
216 #endif /* CONFIG_SMP */
217
218 /*
219 * fast, non-SMP set_bit routine
220 */
221 static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
222 {
223 unsigned long addr;
224
225 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
226 asm volatile(
227 " oc %O0(1,%R0),%1"
228 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
229 }
230
231 static inline void
232 __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
233 {
234 unsigned long addr;
235
236 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
237 *(unsigned char *) addr |= 1 << (nr & 7);
238 }
239
240 #define set_bit_simple(nr,addr) \
241 (__builtin_constant_p((nr)) ? \
242 __constant_set_bit((nr),(addr)) : \
243 __set_bit((nr),(addr)) )
244
245 /*
246 * fast, non-SMP clear_bit routine
247 */
248 static inline void
249 __clear_bit(unsigned long nr, volatile unsigned long *ptr)
250 {
251 unsigned long addr;
252
253 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
254 asm volatile(
255 " nc %O0(1,%R0),%1"
256 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
257 }
258
259 static inline void
260 __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
261 {
262 unsigned long addr;
263
264 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
265 *(unsigned char *) addr &= ~(1 << (nr & 7));
266 }
267
268 #define clear_bit_simple(nr,addr) \
269 (__builtin_constant_p((nr)) ? \
270 __constant_clear_bit((nr),(addr)) : \
271 __clear_bit((nr),(addr)) )
272
273 /*
274 * fast, non-SMP change_bit routine
275 */
276 static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
277 {
278 unsigned long addr;
279
280 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
281 asm volatile(
282 " xc %O0(1,%R0),%1"
283 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
284 }
285
286 static inline void
287 __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
288 {
289 unsigned long addr;
290
291 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
292 *(unsigned char *) addr ^= 1 << (nr & 7);
293 }
294
295 #define change_bit_simple(nr,addr) \
296 (__builtin_constant_p((nr)) ? \
297 __constant_change_bit((nr),(addr)) : \
298 __change_bit((nr),(addr)) )
299
300 /*
301 * fast, non-SMP test_and_set_bit routine
302 */
303 static inline int
304 test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
305 {
306 unsigned long addr;
307 unsigned char ch;
308
309 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
310 ch = *(unsigned char *) addr;
311 asm volatile(
312 " oc %O0(1,%R0),%1"
313 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
314 : "cc", "memory");
315 return (ch >> (nr & 7)) & 1;
316 }
317 #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
318
319 /*
320 * fast, non-SMP test_and_clear_bit routine
321 */
322 static inline int
323 test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
324 {
325 unsigned long addr;
326 unsigned char ch;
327
328 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
329 ch = *(unsigned char *) addr;
330 asm volatile(
331 " nc %O0(1,%R0),%1"
332 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
333 : "cc", "memory");
334 return (ch >> (nr & 7)) & 1;
335 }
336 #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
337
338 /*
339 * fast, non-SMP test_and_change_bit routine
340 */
341 static inline int
342 test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
343 {
344 unsigned long addr;
345 unsigned char ch;
346
347 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
348 ch = *(unsigned char *) addr;
349 asm volatile(
350 " xc %O0(1,%R0),%1"
351 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
352 : "cc", "memory");
353 return (ch >> (nr & 7)) & 1;
354 }
355 #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
356
357 #ifdef CONFIG_SMP
358 #define set_bit set_bit_cs
359 #define clear_bit clear_bit_cs
360 #define change_bit change_bit_cs
361 #define test_and_set_bit test_and_set_bit_cs
362 #define test_and_clear_bit test_and_clear_bit_cs
363 #define test_and_change_bit test_and_change_bit_cs
364 #else
365 #define set_bit set_bit_simple
366 #define clear_bit clear_bit_simple
367 #define change_bit change_bit_simple
368 #define test_and_set_bit test_and_set_bit_simple
369 #define test_and_clear_bit test_and_clear_bit_simple
370 #define test_and_change_bit test_and_change_bit_simple
371 #endif
372
373
374 /*
375 * This routine doesn't need to be atomic.
376 */
377
378 static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
379 {
380 unsigned long addr;
381 unsigned char ch;
382
383 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
384 ch = *(volatile unsigned char *) addr;
385 return (ch >> (nr & 7)) & 1;
386 }
387
388 static inline int
389 __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
390 return (((volatile char *) addr)
391 [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0;
392 }
393
394 #define test_bit(nr,addr) \
395 (__builtin_constant_p((nr)) ? \
396 __constant_test_bit((nr),(addr)) : \
397 __test_bit((nr),(addr)) )
398
399 /*
400 * Optimized find bit helper functions.
401 */
402
403 /**
404 * __ffz_word_loop - find byte offset of first long != -1UL
405 * @addr: pointer to array of unsigned long
406 * @size: size of the array in bits
407 */
408 static inline unsigned long __ffz_word_loop(const unsigned long *addr,
409 unsigned long size)
410 {
411 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
412 unsigned long bytes = 0;
413
414 asm volatile(
415 #ifndef __s390x__
416 " ahi %1,-1\n"
417 " sra %1,5\n"
418 " jz 1f\n"
419 "0: c %2,0(%0,%3)\n"
420 " jne 1f\n"
421 " la %0,4(%0)\n"
422 " brct %1,0b\n"
423 "1:\n"
424 #else
425 " aghi %1,-1\n"
426 " srag %1,%1,6\n"
427 " jz 1f\n"
428 "0: cg %2,0(%0,%3)\n"
429 " jne 1f\n"
430 " la %0,8(%0)\n"
431 " brct %1,0b\n"
432 "1:\n"
433 #endif
434 : "+&a" (bytes), "+&d" (size)
435 : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
436 : "cc" );
437 return bytes;
438 }
439
440 /**
441 * __ffs_word_loop - find byte offset of first long != 0UL
442 * @addr: pointer to array of unsigned long
443 * @size: size of the array in bits
444 */
445 static inline unsigned long __ffs_word_loop(const unsigned long *addr,
446 unsigned long size)
447 {
448 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
449 unsigned long bytes = 0;
450
451 asm volatile(
452 #ifndef __s390x__
453 " ahi %1,-1\n"
454 " sra %1,5\n"
455 " jz 1f\n"
456 "0: c %2,0(%0,%3)\n"
457 " jne 1f\n"
458 " la %0,4(%0)\n"
459 " brct %1,0b\n"
460 "1:\n"
461 #else
462 " aghi %1,-1\n"
463 " srag %1,%1,6\n"
464 " jz 1f\n"
465 "0: cg %2,0(%0,%3)\n"
466 " jne 1f\n"
467 " la %0,8(%0)\n"
468 " brct %1,0b\n"
469 "1:\n"
470 #endif
471 : "+&a" (bytes), "+&a" (size)
472 : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
473 : "cc" );
474 return bytes;
475 }
476
477 /**
478 * __ffz_word - add number of the first unset bit
479 * @nr: base value the bit number is added to
480 * @word: the word that is searched for unset bits
481 */
482 static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
483 {
484 #ifdef __s390x__
485 if ((word & 0xffffffff) == 0xffffffff) {
486 word >>= 32;
487 nr += 32;
488 }
489 #endif
490 if ((word & 0xffff) == 0xffff) {
491 word >>= 16;
492 nr += 16;
493 }
494 if ((word & 0xff) == 0xff) {
495 word >>= 8;
496 nr += 8;
497 }
498 return nr + _zb_findmap[(unsigned char) word];
499 }
500
501 /**
502 * __ffs_word - add number of the first set bit
503 * @nr: base value the bit number is added to
504 * @word: the word that is searched for set bits
505 */
506 static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
507 {
508 #ifdef __s390x__
509 if ((word & 0xffffffff) == 0) {
510 word >>= 32;
511 nr += 32;
512 }
513 #endif
514 if ((word & 0xffff) == 0) {
515 word >>= 16;
516 nr += 16;
517 }
518 if ((word & 0xff) == 0) {
519 word >>= 8;
520 nr += 8;
521 }
522 return nr + _sb_findmap[(unsigned char) word];
523 }
524
525
526 /**
527 * __load_ulong_be - load big endian unsigned long
528 * @p: pointer to array of unsigned long
529 * @offset: byte offset of source value in the array
530 */
531 static inline unsigned long __load_ulong_be(const unsigned long *p,
532 unsigned long offset)
533 {
534 p = (unsigned long *)((unsigned long) p + offset);
535 return *p;
536 }
537
538 /**
539 * __load_ulong_le - load little endian unsigned long
540 * @p: pointer to array of unsigned long
541 * @offset: byte offset of source value in the array
542 */
543 static inline unsigned long __load_ulong_le(const unsigned long *p,
544 unsigned long offset)
545 {
546 unsigned long word;
547
548 p = (unsigned long *)((unsigned long) p + offset);
549 #ifndef __s390x__
550 asm volatile(
551 " ic %0,%O1(%R1)\n"
552 " icm %0,2,%O1+1(%R1)\n"
553 " icm %0,4,%O1+2(%R1)\n"
554 " icm %0,8,%O1+3(%R1)"
555 : "=&d" (word) : "Q" (*p) : "cc");
556 #else
557 asm volatile(
558 " lrvg %0,%1"
559 : "=d" (word) : "m" (*p) );
560 #endif
561 return word;
562 }
563
564 /*
565 * The various find bit functions.
566 */
567
568 /*
569 * ffz - find first zero in word.
570 * @word: The word to search
571 *
572 * Undefined if no zero exists, so code should check against ~0UL first.
573 */
574 static inline unsigned long ffz(unsigned long word)
575 {
576 return __ffz_word(0, word);
577 }
578
579 /**
580 * __ffs - find first bit in word.
581 * @word: The word to search
582 *
583 * Undefined if no bit exists, so code should check against 0 first.
584 */
585 static inline unsigned long __ffs (unsigned long word)
586 {
587 return __ffs_word(0, word);
588 }
589
590 /**
591 * ffs - find first bit set
592 * @x: the word to search
593 *
594 * This is defined the same way as
595 * the libc and compiler builtin ffs routines, therefore
596 * differs in spirit from the above ffz (man ffs).
597 */
598 static inline int ffs(int x)
599 {
600 if (!x)
601 return 0;
602 return __ffs_word(1, x);
603 }
604
605 /**
606 * find_first_zero_bit - find the first zero bit in a memory region
607 * @addr: The address to start the search at
608 * @size: The maximum size to search
609 *
610 * Returns the bit-number of the first zero bit, not the number of the byte
611 * containing a bit.
612 */
613 static inline unsigned long find_first_zero_bit(const unsigned long *addr,
614 unsigned long size)
615 {
616 unsigned long bytes, bits;
617
618 if (!size)
619 return 0;
620 bytes = __ffz_word_loop(addr, size);
621 bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
622 return (bits < size) ? bits : size;
623 }
624 #define find_first_zero_bit find_first_zero_bit
625
626 /**
627 * find_first_bit - find the first set bit in a memory region
628 * @addr: The address to start the search at
629 * @size: The maximum size to search
630 *
631 * Returns the bit-number of the first set bit, not the number of the byte
632 * containing a bit.
633 */
634 static inline unsigned long find_first_bit(const unsigned long * addr,
635 unsigned long size)
636 {
637 unsigned long bytes, bits;
638
639 if (!size)
640 return 0;
641 bytes = __ffs_word_loop(addr, size);
642 bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
643 return (bits < size) ? bits : size;
644 }
645 #define find_first_bit find_first_bit
646
647 /**
648 * find_next_zero_bit - find the first zero bit in a memory region
649 * @addr: The address to base the search on
650 * @offset: The bitnumber to start searching at
651 * @size: The maximum size to search
652 */
653 static inline int find_next_zero_bit (const unsigned long * addr,
654 unsigned long size,
655 unsigned long offset)
656 {
657 const unsigned long *p;
658 unsigned long bit, set;
659
660 if (offset >= size)
661 return size;
662 bit = offset & (__BITOPS_WORDSIZE - 1);
663 offset -= bit;
664 size -= offset;
665 p = addr + offset / __BITOPS_WORDSIZE;
666 if (bit) {
667 /*
668 * __ffz_word returns __BITOPS_WORDSIZE
669 * if no zero bit is present in the word.
670 */
671 set = __ffz_word(bit, *p >> bit);
672 if (set >= size)
673 return size + offset;
674 if (set < __BITOPS_WORDSIZE)
675 return set + offset;
676 offset += __BITOPS_WORDSIZE;
677 size -= __BITOPS_WORDSIZE;
678 p++;
679 }
680 return offset + find_first_zero_bit(p, size);
681 }
682 #define find_next_zero_bit find_next_zero_bit
683
684 /**
685 * find_next_bit - find the first set bit in a memory region
686 * @addr: The address to base the search on
687 * @offset: The bitnumber to start searching at
688 * @size: The maximum size to search
689 */
690 static inline int find_next_bit (const unsigned long * addr,
691 unsigned long size,
692 unsigned long offset)
693 {
694 const unsigned long *p;
695 unsigned long bit, set;
696
697 if (offset >= size)
698 return size;
699 bit = offset & (__BITOPS_WORDSIZE - 1);
700 offset -= bit;
701 size -= offset;
702 p = addr + offset / __BITOPS_WORDSIZE;
703 if (bit) {
704 /*
705 * __ffs_word returns __BITOPS_WORDSIZE
706 * if no one bit is present in the word.
707 */
708 set = __ffs_word(0, *p & (~0UL << bit));
709 if (set >= size)
710 return size + offset;
711 if (set < __BITOPS_WORDSIZE)
712 return set + offset;
713 offset += __BITOPS_WORDSIZE;
714 size -= __BITOPS_WORDSIZE;
715 p++;
716 }
717 return offset + find_first_bit(p, size);
718 }
719 #define find_next_bit find_next_bit
720
721 /*
722 * Every architecture must define this function. It's the fastest
723 * way of searching a 140-bit bitmap where the first 100 bits are
724 * unlikely to be set. It's guaranteed that at least one of the 140
725 * bits is cleared.
726 */
727 static inline int sched_find_first_bit(unsigned long *b)
728 {
729 return find_first_bit(b, 140);
730 }
731
732 #include <asm-generic/bitops/fls.h>
733 #include <asm-generic/bitops/__fls.h>
734 #include <asm-generic/bitops/fls64.h>
735
736 #include <asm-generic/bitops/hweight.h>
737 #include <asm-generic/bitops/lock.h>
738
739 /*
740 * ATTENTION: intel byte ordering convention for ext2 and minix !!
741 * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
742 * bit 32 is the LSB of (addr+4).
743 * That combined with the little endian byte order of Intel gives the
744 * following bit order in memory:
745 * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
746 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
747 */
748
749 static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
750 {
751 unsigned long bytes, bits;
752
753 if (!size)
754 return 0;
755 bytes = __ffz_word_loop(vaddr, size);
756 bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
757 return (bits < size) ? bits : size;
758 }
759 #define find_first_zero_bit_le find_first_zero_bit_le
760
761 static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
762 unsigned long offset)
763 {
764 unsigned long *addr = vaddr, *p;
765 unsigned long bit, set;
766
767 if (offset >= size)
768 return size;
769 bit = offset & (__BITOPS_WORDSIZE - 1);
770 offset -= bit;
771 size -= offset;
772 p = addr + offset / __BITOPS_WORDSIZE;
773 if (bit) {
774 /*
775 * s390 version of ffz returns __BITOPS_WORDSIZE
776 * if no zero bit is present in the word.
777 */
778 set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
779 if (set >= size)
780 return size + offset;
781 if (set < __BITOPS_WORDSIZE)
782 return set + offset;
783 offset += __BITOPS_WORDSIZE;
784 size -= __BITOPS_WORDSIZE;
785 p++;
786 }
787 return offset + find_first_zero_bit_le(p, size);
788 }
789 #define find_next_zero_bit_le find_next_zero_bit_le
790
791 static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
792 {
793 unsigned long bytes, bits;
794
795 if (!size)
796 return 0;
797 bytes = __ffs_word_loop(vaddr, size);
798 bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
799 return (bits < size) ? bits : size;
800 }
801 #define find_first_bit_le find_first_bit_le
802
803 static inline int find_next_bit_le(void *vaddr, unsigned long size,
804 unsigned long offset)
805 {
806 unsigned long *addr = vaddr, *p;
807 unsigned long bit, set;
808
809 if (offset >= size)
810 return size;
811 bit = offset & (__BITOPS_WORDSIZE - 1);
812 offset -= bit;
813 size -= offset;
814 p = addr + offset / __BITOPS_WORDSIZE;
815 if (bit) {
816 /*
817 * s390 version of ffz returns __BITOPS_WORDSIZE
818 * if no zero bit is present in the word.
819 */
820 set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
821 if (set >= size)
822 return size + offset;
823 if (set < __BITOPS_WORDSIZE)
824 return set + offset;
825 offset += __BITOPS_WORDSIZE;
826 size -= __BITOPS_WORDSIZE;
827 p++;
828 }
829 return offset + find_first_bit_le(p, size);
830 }
831 #define find_next_bit_le find_next_bit_le
832
833 #include <asm-generic/bitops/le.h>
834
835 #define ext2_set_bit_atomic(lock, nr, addr) \
836 test_and_set_bit_le(nr, addr)
837 #define ext2_clear_bit_atomic(lock, nr, addr) \
838 test_and_clear_bit_le(nr, addr)
839
840
841 #endif /* __KERNEL__ */
842
843 #endif /* _S390_BITOPS_H */
This page took 0.047083 seconds and 6 git commands to generate.