blackfin architecture
[deliverable/linux.git] / include / asm-blackfin / bitops.h
1 #ifndef _BLACKFIN_BITOPS_H
2 #define _BLACKFIN_BITOPS_H
3
4 /*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8 #include <linux/compiler.h>
9 #include <asm/byteorder.h> /* swab32 */
10 #include <asm/system.h> /* save_flags */
11
12 #ifdef __KERNEL__
13
14 #include <asm-generic/bitops/ffs.h>
15 #include <asm-generic/bitops/__ffs.h>
16 #include <asm-generic/bitops/sched.h>
17 #include <asm-generic/bitops/ffz.h>
18
19 static __inline__ void set_bit(int nr, volatile unsigned long *addr)
20 {
21 int *a = (int *)addr;
22 int mask;
23 unsigned long flags;
24
25 a += nr >> 5;
26 mask = 1 << (nr & 0x1f);
27 local_irq_save(flags);
28 *a |= mask;
29 local_irq_restore(flags);
30 }
31
32 static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
33 {
34 int *a = (int *)addr;
35 int mask;
36
37 a += nr >> 5;
38 mask = 1 << (nr & 0x1f);
39 *a |= mask;
40 }
41
42 /*
43 * clear_bit() doesn't provide any barrier for the compiler.
44 */
45 #define smp_mb__before_clear_bit() barrier()
46 #define smp_mb__after_clear_bit() barrier()
47
48 static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
49 {
50 int *a = (int *)addr;
51 int mask;
52 unsigned long flags;
53 a += nr >> 5;
54 mask = 1 << (nr & 0x1f);
55 local_irq_save(flags);
56 *a &= ~mask;
57 local_irq_restore(flags);
58 }
59
60 static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
61 {
62 int *a = (int *)addr;
63 int mask;
64
65 a += nr >> 5;
66 mask = 1 << (nr & 0x1f);
67 *a &= ~mask;
68 }
69
70 static __inline__ void change_bit(int nr, volatile unsigned long *addr)
71 {
72 int mask, flags;
73 unsigned long *ADDR = (unsigned long *)addr;
74
75 ADDR += nr >> 5;
76 mask = 1 << (nr & 31);
77 local_irq_save(flags);
78 *ADDR ^= mask;
79 local_irq_restore(flags);
80 }
81
82 static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
83 {
84 int mask;
85 unsigned long *ADDR = (unsigned long *)addr;
86
87 ADDR += nr >> 5;
88 mask = 1 << (nr & 31);
89 *ADDR ^= mask;
90 }
91
92 static __inline__ int test_and_set_bit(int nr, void *addr)
93 {
94 int mask, retval;
95 volatile unsigned int *a = (volatile unsigned int *)addr;
96 unsigned long flags;
97
98 a += nr >> 5;
99 mask = 1 << (nr & 0x1f);
100 local_irq_save(flags);
101 retval = (mask & *a) != 0;
102 *a |= mask;
103 local_irq_restore(flags);
104
105 return retval;
106 }
107
108 static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
109 {
110 int mask, retval;
111 volatile unsigned int *a = (volatile unsigned int *)addr;
112
113 a += nr >> 5;
114 mask = 1 << (nr & 0x1f);
115 retval = (mask & *a) != 0;
116 *a |= mask;
117 return retval;
118 }
119
120 static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
121 {
122 int mask, retval;
123 volatile unsigned int *a = (volatile unsigned int *)addr;
124 unsigned long flags;
125
126 a += nr >> 5;
127 mask = 1 << (nr & 0x1f);
128 local_irq_save(flags);
129 retval = (mask & *a) != 0;
130 *a &= ~mask;
131 local_irq_restore(flags);
132
133 return retval;
134 }
135
136 static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
137 {
138 int mask, retval;
139 volatile unsigned int *a = (volatile unsigned int *)addr;
140
141 a += nr >> 5;
142 mask = 1 << (nr & 0x1f);
143 retval = (mask & *a) != 0;
144 *a &= ~mask;
145 return retval;
146 }
147
148 static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr)
149 {
150 int mask, retval;
151 volatile unsigned int *a = (volatile unsigned int *)addr;
152 unsigned long flags;
153
154 a += nr >> 5;
155 mask = 1 << (nr & 0x1f);
156 local_irq_save(flags);
157 retval = (mask & *a) != 0;
158 *a ^= mask;
159 local_irq_restore(flags);
160 return retval;
161 }
162
163 static __inline__ int __test_and_change_bit(int nr,
164 volatile unsigned long *addr)
165 {
166 int mask, retval;
167 volatile unsigned int *a = (volatile unsigned int *)addr;
168
169 a += nr >> 5;
170 mask = 1 << (nr & 0x1f);
171 retval = (mask & *a) != 0;
172 *a ^= mask;
173 return retval;
174 }
175
176 /*
177 * This routine doesn't need to be atomic.
178 */
179 static __inline__ int __constant_test_bit(int nr, const void *addr)
180 {
181 return ((1UL << (nr & 31)) &
182 (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
183 }
184
185 static __inline__ int __test_bit(int nr, const void *addr)
186 {
187 int *a = (int *)addr;
188 int mask;
189
190 a += nr >> 5;
191 mask = 1 << (nr & 0x1f);
192 return ((mask & *a) != 0);
193 }
194
195 #define test_bit(nr,addr) \
196 (__builtin_constant_p(nr) ? \
197 __constant_test_bit((nr),(addr)) : \
198 __test_bit((nr),(addr)))
199
200 #include <asm-generic/bitops/find.h>
201 #include <asm-generic/bitops/hweight.h>
202
203 #include <asm-generic/bitops/ext2-atomic.h>
204 #include <asm-generic/bitops/ext2-non-atomic.h>
205
206 #include <asm-generic/bitops/minix.h>
207
208 #endif /* __KERNEL__ */
209
210 #include <asm-generic/bitops/fls.h>
211 #include <asm-generic/bitops/fls64.h>
212
213 #endif /* _BLACKFIN_BITOPS_H */
This page took 0.038426 seconds and 5 git commands to generate.