Merge tag 'powerpc-4.6-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[deliverable/linux.git] / arch / powerpc / include / asm / word-at-a-time.h
CommitLineData
1629372c
PM
1#ifndef _ASM_WORD_AT_A_TIME_H
2#define _ASM_WORD_AT_A_TIME_H
3
4/*
5 * Word-at-a-time interfaces for PowerPC.
6 */
7
8#include <linux/kernel.h>
9#include <asm/asm-compat.h>
10
4c74c330
AB
11#ifdef __BIG_ENDIAN__
12
1629372c
PM
13struct word_at_a_time {
14 const unsigned long high_bits, low_bits;
15};
16
17#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
18
19/* Bit set in the bytes that have a zero */
20static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
21{
22 unsigned long mask = (val & c->low_bits) + c->low_bits;
23 return ~(mask | rhs);
24}
25
26#define create_zero_mask(mask) (mask)
27
28static inline long find_zero(unsigned long mask)
29{
30 long leading_zero_bits;
31
32 asm (PPC_CNTLZL "%0,%1" : "=r" (leading_zero_bits) : "r" (mask));
33 return leading_zero_bits >> 3;
34}
35
36static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
37{
38 unsigned long rhs = val | c->low_bits;
39 *data = rhs;
40 return (val + c->high_bits) & ~rhs;
41}
42
7a5692e6
CM
43static inline unsigned long zero_bytemask(unsigned long mask)
44{
45 return ~1ul << __fls(mask);
46}
47
4c74c330
AB
48#else
49
8989aa4a
AB
50#ifdef CONFIG_64BIT
51
52/* unused */
4c74c330 53struct word_at_a_time {
4c74c330
AB
54};
55
8989aa4a 56#define WORD_AT_A_TIME_CONSTANTS { }
4c74c330 57
8989aa4a
AB
58/* This will give us 0xff for a NULL char and 0x00 elsewhere */
59static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
60{
61 unsigned long ret;
62 unsigned long zero = 0;
4c74c330 63
8989aa4a
AB
64 asm("cmpb %0,%1,%2" : "=r" (ret) : "r" (a), "r" (zero));
65 *bits = ret;
d0cebfa6 66
8989aa4a
AB
67 return ret;
68}
69
70static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
71{
72 return bits;
73}
74
75/* Alan Modra's little-endian strlen tail for 64-bit */
76static inline unsigned long create_zero_mask(unsigned long bits)
4c74c330 77{
d0cebfa6
PB
78 unsigned long leading_zero_bits;
79 long trailing_zero_bit_mask;
80
8989aa4a
AB
81 asm("addi %1,%2,-1\n\t"
82 "andc %1,%1,%2\n\t"
83 "popcntd %0,%1"
84 : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
b4c11211 85 : "b" (bits));
8989aa4a
AB
86
87 return leading_zero_bits;
88}
89
90static inline unsigned long find_zero(unsigned long mask)
91{
92 return mask >> 3;
93}
94
95/* This assumes that we never ask for an all 1s bitmask */
96static inline unsigned long zero_bytemask(unsigned long mask)
97{
98 return (1UL << mask) - 1;
4c74c330
AB
99}
100
101#else /* 32-bit case */
102
8989aa4a
AB
103struct word_at_a_time {
104 const unsigned long one_bits, high_bits;
105};
106
107#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
108
d0cebfa6
PB
109/*
110 * This is largely generic for little-endian machines, but the
111 * optimal byte mask counting is probably going to be something
112 * that is architecture-specific. If you have a reliably fast
113 * bit count instruction, that might be better than the multiply
114 * and shift, for example.
115 */
116
4c74c330
AB
117/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
118static inline long count_masked_bytes(long mask)
119{
120 /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
121 long a = (0x0ff0001+mask) >> 23;
122 /* Fix the 1 for 00 case */
123 return a & mask;
124}
125
d0cebfa6
PB
126static inline unsigned long create_zero_mask(unsigned long bits)
127{
128 bits = (bits - 1) & ~bits;
129 return bits >> 7;
130}
131
132static inline unsigned long find_zero(unsigned long mask)
133{
134 return count_masked_bytes(mask);
135}
136
4c74c330
AB
137/* Return nonzero if it has a zero */
138static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
139{
140 unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
141 *bits = mask;
142 return mask;
143}
144
145static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
146{
147 return bits;
148}
149
4c74c330
AB
150/* The mask we created is directly usable as a bytemask */
151#define zero_bytemask(mask) (mask)
152
8989aa4a
AB
153#endif /* CONFIG_64BIT */
154
155#endif /* __BIG_ENDIAN__ */
4c74c330 156
fe2a1bb1
ME
157/*
158 * We use load_unaligned_zero() in a selftest, which builds a userspace
159 * program. Some linker scripts seem to discard the .fixup section, so allow
160 * the test code to use a different section name.
161 */
162#ifndef FIXUP_SECTION
163#define FIXUP_SECTION ".fixup"
164#endif
165
de5946c0
AB
166static inline unsigned long load_unaligned_zeropad(const void *addr)
167{
168 unsigned long ret, offset, tmp;
169
170 asm(
171 "1: " PPC_LL "%[ret], 0(%[addr])\n"
172 "2:\n"
fe2a1bb1 173 ".section " FIXUP_SECTION ",\"ax\"\n"
de5946c0
AB
174 "3: "
175#ifdef __powerpc64__
176 "clrrdi %[tmp], %[addr], 3\n\t"
177 "clrlsldi %[offset], %[addr], 61, 3\n\t"
178 "ld %[ret], 0(%[tmp])\n\t"
179#ifdef __BIG_ENDIAN__
180 "sld %[ret], %[ret], %[offset]\n\t"
181#else
182 "srd %[ret], %[ret], %[offset]\n\t"
183#endif
184#else
185 "clrrwi %[tmp], %[addr], 2\n\t"
186 "clrlslwi %[offset], %[addr], 30, 3\n\t"
187 "lwz %[ret], 0(%[tmp])\n\t"
188#ifdef __BIG_ENDIAN__
189 "slw %[ret], %[ret], %[offset]\n\t"
190#else
191 "srw %[ret], %[ret], %[offset]\n\t"
192#endif
193#endif
194 "b 2b\n"
195 ".previous\n"
196 ".section __ex_table,\"a\"\n\t"
197 PPC_LONG_ALIGN "\n\t"
198 PPC_LONG "1b,3b\n"
199 ".previous"
200 : [tmp] "=&b" (tmp), [offset] "=&r" (offset), [ret] "=&r" (ret)
201 : [addr] "b" (addr), "m" (*(unsigned long *)addr));
202
203 return ret;
204}
205
fe2a1bb1
ME
206#undef FIXUP_SECTION
207
1629372c 208#endif /* _ASM_WORD_AT_A_TIME_H */
This page took 0.216583 seconds and 5 git commands to generate.