Commit | Line | Data |
---|---|---|
048a8b8c JK |
1 | /* |
2 | * Copyright (C) 2012 Intel Corporation | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; version 2 | |
7 | * of the License. | |
8 | */ | |
9 | ||
048a8b8c JK |
10 | #include <linux/raid/pq.h> |
11 | #include "x86.h" | |
12 | ||
13 | static int raid6_has_ssse3(void) | |
14 | { | |
15 | return boot_cpu_has(X86_FEATURE_XMM) && | |
16 | boot_cpu_has(X86_FEATURE_XMM2) && | |
17 | boot_cpu_has(X86_FEATURE_SSSE3); | |
18 | } | |
19 | ||
2aa4ee2a JK |
20 | static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, |
21 | int failb, void **ptrs) | |
048a8b8c JK |
22 | { |
23 | u8 *p, *q, *dp, *dq; | |
24 | const u8 *pbmul; /* P multiplier table for B data */ | |
25 | const u8 *qmul; /* Q multiplier table (for both) */ | |
26 | static const u8 __aligned(16) x0f[16] = { | |
27 | 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, | |
28 | 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f}; | |
29 | ||
30 | p = (u8 *)ptrs[disks-2]; | |
31 | q = (u8 *)ptrs[disks-1]; | |
32 | ||
33 | /* Compute syndrome with zero for the missing data pages | |
34 | Use the dead data pages as temporary storage for | |
35 | delta p and delta q */ | |
36 | dp = (u8 *)ptrs[faila]; | |
37 | ptrs[faila] = (void *)raid6_empty_zero_page; | |
38 | ptrs[disks-2] = dp; | |
39 | dq = (u8 *)ptrs[failb]; | |
40 | ptrs[failb] = (void *)raid6_empty_zero_page; | |
41 | ptrs[disks-1] = dq; | |
42 | ||
43 | raid6_call.gen_syndrome(disks, bytes, ptrs); | |
44 | ||
45 | /* Restore pointer table */ | |
46 | ptrs[faila] = dp; | |
47 | ptrs[failb] = dq; | |
48 | ptrs[disks-2] = p; | |
49 | ptrs[disks-1] = q; | |
50 | ||
51 | /* Now, pick the proper data tables */ | |
52 | pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]]; | |
53 | qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^ | |
54 | raid6_gfexp[failb]]]; | |
55 | ||
56 | kernel_fpu_begin(); | |
57 | ||
58 | asm volatile("movdqa %0,%%xmm7" : : "m" (x0f[0])); | |
59 | ||
60 | #ifdef CONFIG_X86_64 | |
61 | asm volatile("movdqa %0,%%xmm6" : : "m" (qmul[0])); | |
62 | asm volatile("movdqa %0,%%xmm14" : : "m" (pbmul[0])); | |
63 | asm volatile("movdqa %0,%%xmm15" : : "m" (pbmul[16])); | |
64 | #endif | |
65 | ||
66 | /* Now do it... */ | |
67 | while (bytes) { | |
68 | #ifdef CONFIG_X86_64 | |
69 | /* xmm6, xmm14, xmm15 */ | |
70 | ||
71 | asm volatile("movdqa %0,%%xmm1" : : "m" (q[0])); | |
72 | asm volatile("movdqa %0,%%xmm9" : : "m" (q[16])); | |
73 | asm volatile("movdqa %0,%%xmm0" : : "m" (p[0])); | |
74 | asm volatile("movdqa %0,%%xmm8" : : "m" (p[16])); | |
75 | asm volatile("pxor %0,%%xmm1" : : "m" (dq[0])); | |
76 | asm volatile("pxor %0,%%xmm9" : : "m" (dq[16])); | |
77 | asm volatile("pxor %0,%%xmm0" : : "m" (dp[0])); | |
78 | asm volatile("pxor %0,%%xmm8" : : "m" (dp[16])); | |
79 | ||
80 | /* xmm0/8 = px */ | |
81 | ||
82 | asm volatile("movdqa %xmm6,%xmm4"); | |
83 | asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16])); | |
84 | asm volatile("movdqa %xmm6,%xmm12"); | |
85 | asm volatile("movdqa %xmm5,%xmm13"); | |
86 | asm volatile("movdqa %xmm1,%xmm3"); | |
87 | asm volatile("movdqa %xmm9,%xmm11"); | |
88 | asm volatile("movdqa %xmm0,%xmm2"); /* xmm2/10 = px */ | |
89 | asm volatile("movdqa %xmm8,%xmm10"); | |
90 | asm volatile("psraw $4,%xmm1"); | |
91 | asm volatile("psraw $4,%xmm9"); | |
92 | asm volatile("pand %xmm7,%xmm3"); | |
93 | asm volatile("pand %xmm7,%xmm11"); | |
94 | asm volatile("pand %xmm7,%xmm1"); | |
95 | asm volatile("pand %xmm7,%xmm9"); | |
96 | asm volatile("pshufb %xmm3,%xmm4"); | |
97 | asm volatile("pshufb %xmm11,%xmm12"); | |
98 | asm volatile("pshufb %xmm1,%xmm5"); | |
99 | asm volatile("pshufb %xmm9,%xmm13"); | |
100 | asm volatile("pxor %xmm4,%xmm5"); | |
101 | asm volatile("pxor %xmm12,%xmm13"); | |
102 | ||
103 | /* xmm5/13 = qx */ | |
104 | ||
105 | asm volatile("movdqa %xmm14,%xmm4"); | |
106 | asm volatile("movdqa %xmm15,%xmm1"); | |
107 | asm volatile("movdqa %xmm14,%xmm12"); | |
108 | asm volatile("movdqa %xmm15,%xmm9"); | |
109 | asm volatile("movdqa %xmm2,%xmm3"); | |
110 | asm volatile("movdqa %xmm10,%xmm11"); | |
111 | asm volatile("psraw $4,%xmm2"); | |
112 | asm volatile("psraw $4,%xmm10"); | |
113 | asm volatile("pand %xmm7,%xmm3"); | |
114 | asm volatile("pand %xmm7,%xmm11"); | |
115 | asm volatile("pand %xmm7,%xmm2"); | |
116 | asm volatile("pand %xmm7,%xmm10"); | |
117 | asm volatile("pshufb %xmm3,%xmm4"); | |
118 | asm volatile("pshufb %xmm11,%xmm12"); | |
119 | asm volatile("pshufb %xmm2,%xmm1"); | |
120 | asm volatile("pshufb %xmm10,%xmm9"); | |
121 | asm volatile("pxor %xmm4,%xmm1"); | |
122 | asm volatile("pxor %xmm12,%xmm9"); | |
123 | ||
124 | /* xmm1/9 = pbmul[px] */ | |
125 | asm volatile("pxor %xmm5,%xmm1"); | |
126 | asm volatile("pxor %xmm13,%xmm9"); | |
127 | /* xmm1/9 = db = DQ */ | |
128 | asm volatile("movdqa %%xmm1,%0" : "=m" (dq[0])); | |
129 | asm volatile("movdqa %%xmm9,%0" : "=m" (dq[16])); | |
130 | ||
131 | asm volatile("pxor %xmm1,%xmm0"); | |
132 | asm volatile("pxor %xmm9,%xmm8"); | |
133 | asm volatile("movdqa %%xmm0,%0" : "=m" (dp[0])); | |
134 | asm volatile("movdqa %%xmm8,%0" : "=m" (dp[16])); | |
135 | ||
136 | bytes -= 32; | |
137 | p += 32; | |
138 | q += 32; | |
139 | dp += 32; | |
140 | dq += 32; | |
141 | #else | |
142 | asm volatile("movdqa %0,%%xmm1" : : "m" (*q)); | |
143 | asm volatile("movdqa %0,%%xmm0" : : "m" (*p)); | |
144 | asm volatile("pxor %0,%%xmm1" : : "m" (*dq)); | |
145 | asm volatile("pxor %0,%%xmm0" : : "m" (*dp)); | |
146 | ||
147 | /* 1 = dq ^ q | |
148 | * 0 = dp ^ p | |
149 | */ | |
150 | asm volatile("movdqa %0,%%xmm4" : : "m" (qmul[0])); | |
151 | asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16])); | |
152 | ||
153 | asm volatile("movdqa %xmm1,%xmm3"); | |
154 | asm volatile("psraw $4,%xmm1"); | |
155 | asm volatile("pand %xmm7,%xmm3"); | |
156 | asm volatile("pand %xmm7,%xmm1"); | |
157 | asm volatile("pshufb %xmm3,%xmm4"); | |
158 | asm volatile("pshufb %xmm1,%xmm5"); | |
159 | asm volatile("pxor %xmm4,%xmm5"); | |
160 | ||
161 | asm volatile("movdqa %xmm0,%xmm2"); /* xmm2 = px */ | |
162 | ||
163 | /* xmm5 = qx */ | |
164 | ||
165 | asm volatile("movdqa %0,%%xmm4" : : "m" (pbmul[0])); | |
166 | asm volatile("movdqa %0,%%xmm1" : : "m" (pbmul[16])); | |
167 | asm volatile("movdqa %xmm2,%xmm3"); | |
168 | asm volatile("psraw $4,%xmm2"); | |
169 | asm volatile("pand %xmm7,%xmm3"); | |
170 | asm volatile("pand %xmm7,%xmm2"); | |
171 | asm volatile("pshufb %xmm3,%xmm4"); | |
172 | asm volatile("pshufb %xmm2,%xmm1"); | |
173 | asm volatile("pxor %xmm4,%xmm1"); | |
174 | ||
175 | /* xmm1 = pbmul[px] */ | |
176 | asm volatile("pxor %xmm5,%xmm1"); | |
177 | /* xmm1 = db = DQ */ | |
178 | asm volatile("movdqa %%xmm1,%0" : "=m" (*dq)); | |
179 | ||
180 | asm volatile("pxor %xmm1,%xmm0"); | |
181 | asm volatile("movdqa %%xmm0,%0" : "=m" (*dp)); | |
182 | ||
183 | bytes -= 16; | |
184 | p += 16; | |
185 | q += 16; | |
186 | dp += 16; | |
187 | dq += 16; | |
188 | #endif | |
189 | } | |
190 | ||
191 | kernel_fpu_end(); | |
192 | } | |
193 | ||
194 | ||
2aa4ee2a JK |
195 | static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, |
196 | void **ptrs) | |
048a8b8c JK |
197 | { |
198 | u8 *p, *q, *dq; | |
199 | const u8 *qmul; /* Q multiplier table */ | |
200 | static const u8 __aligned(16) x0f[16] = { | |
201 | 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, | |
202 | 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f}; | |
203 | ||
204 | p = (u8 *)ptrs[disks-2]; | |
205 | q = (u8 *)ptrs[disks-1]; | |
206 | ||
207 | /* Compute syndrome with zero for the missing data page | |
208 | Use the dead data page as temporary storage for delta q */ | |
209 | dq = (u8 *)ptrs[faila]; | |
210 | ptrs[faila] = (void *)raid6_empty_zero_page; | |
211 | ptrs[disks-1] = dq; | |
212 | ||
213 | raid6_call.gen_syndrome(disks, bytes, ptrs); | |
214 | ||
215 | /* Restore pointer table */ | |
216 | ptrs[faila] = dq; | |
217 | ptrs[disks-1] = q; | |
218 | ||
219 | /* Now, pick the proper data tables */ | |
220 | qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]]; | |
221 | ||
222 | kernel_fpu_begin(); | |
223 | ||
224 | asm volatile("movdqa %0, %%xmm7" : : "m" (x0f[0])); | |
225 | ||
226 | while (bytes) { | |
227 | #ifdef CONFIG_X86_64 | |
228 | asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0])); | |
229 | asm volatile("movdqa %0, %%xmm4" : : "m" (dq[16])); | |
230 | asm volatile("pxor %0, %%xmm3" : : "m" (q[0])); | |
231 | asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0])); | |
232 | ||
233 | /* xmm3 = q[0] ^ dq[0] */ | |
234 | ||
235 | asm volatile("pxor %0, %%xmm4" : : "m" (q[16])); | |
236 | asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16])); | |
237 | ||
238 | /* xmm4 = q[16] ^ dq[16] */ | |
239 | ||
240 | asm volatile("movdqa %xmm3, %xmm6"); | |
241 | asm volatile("movdqa %xmm4, %xmm8"); | |
242 | ||
243 | /* xmm4 = xmm8 = q[16] ^ dq[16] */ | |
244 | ||
245 | asm volatile("psraw $4, %xmm3"); | |
246 | asm volatile("pand %xmm7, %xmm6"); | |
247 | asm volatile("pand %xmm7, %xmm3"); | |
248 | asm volatile("pshufb %xmm6, %xmm0"); | |
249 | asm volatile("pshufb %xmm3, %xmm1"); | |
250 | asm volatile("movdqa %0, %%xmm10" : : "m" (qmul[0])); | |
251 | asm volatile("pxor %xmm0, %xmm1"); | |
252 | asm volatile("movdqa %0, %%xmm11" : : "m" (qmul[16])); | |
253 | ||
254 | /* xmm1 = qmul[q[0] ^ dq[0]] */ | |
255 | ||
256 | asm volatile("psraw $4, %xmm4"); | |
257 | asm volatile("pand %xmm7, %xmm8"); | |
258 | asm volatile("pand %xmm7, %xmm4"); | |
259 | asm volatile("pshufb %xmm8, %xmm10"); | |
260 | asm volatile("pshufb %xmm4, %xmm11"); | |
261 | asm volatile("movdqa %0, %%xmm2" : : "m" (p[0])); | |
262 | asm volatile("pxor %xmm10, %xmm11"); | |
263 | asm volatile("movdqa %0, %%xmm12" : : "m" (p[16])); | |
264 | ||
265 | /* xmm11 = qmul[q[16] ^ dq[16]] */ | |
266 | ||
267 | asm volatile("pxor %xmm1, %xmm2"); | |
268 | ||
269 | /* xmm2 = p[0] ^ qmul[q[0] ^ dq[0]] */ | |
270 | ||
271 | asm volatile("pxor %xmm11, %xmm12"); | |
272 | ||
273 | /* xmm12 = p[16] ^ qmul[q[16] ^ dq[16]] */ | |
274 | ||
275 | asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0])); | |
276 | asm volatile("movdqa %%xmm11, %0" : "=m" (dq[16])); | |
277 | ||
278 | asm volatile("movdqa %%xmm2, %0" : "=m" (p[0])); | |
279 | asm volatile("movdqa %%xmm12, %0" : "=m" (p[16])); | |
280 | ||
281 | bytes -= 32; | |
282 | p += 32; | |
283 | q += 32; | |
284 | dq += 32; | |
285 | ||
286 | #else | |
287 | asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0])); | |
288 | asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0])); | |
289 | asm volatile("pxor %0, %%xmm3" : : "m" (q[0])); | |
290 | asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16])); | |
291 | ||
292 | /* xmm3 = *q ^ *dq */ | |
293 | ||
294 | asm volatile("movdqa %xmm3, %xmm6"); | |
295 | asm volatile("movdqa %0, %%xmm2" : : "m" (p[0])); | |
296 | asm volatile("psraw $4, %xmm3"); | |
297 | asm volatile("pand %xmm7, %xmm6"); | |
298 | asm volatile("pand %xmm7, %xmm3"); | |
299 | asm volatile("pshufb %xmm6, %xmm0"); | |
300 | asm volatile("pshufb %xmm3, %xmm1"); | |
301 | asm volatile("pxor %xmm0, %xmm1"); | |
302 | ||
303 | /* xmm1 = qmul[*q ^ *dq */ | |
304 | ||
305 | asm volatile("pxor %xmm1, %xmm2"); | |
306 | ||
307 | /* xmm2 = *p ^ qmul[*q ^ *dq] */ | |
308 | ||
309 | asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0])); | |
310 | asm volatile("movdqa %%xmm2, %0" : "=m" (p[0])); | |
311 | ||
312 | bytes -= 16; | |
313 | p += 16; | |
314 | q += 16; | |
315 | dq += 16; | |
316 | #endif | |
317 | } | |
318 | ||
319 | kernel_fpu_end(); | |
320 | } | |
321 | ||
322 | const struct raid6_recov_calls raid6_recov_ssse3 = { | |
323 | .data2 = raid6_2data_recov_ssse3, | |
324 | .datap = raid6_datap_recov_ssse3, | |
325 | .valid = raid6_has_ssse3, | |
326 | #ifdef CONFIG_X86_64 | |
327 | .name = "ssse3x2", | |
328 | #else | |
329 | .name = "ssse3x1", | |
330 | #endif | |
331 | .priority = 1, | |
332 | }; |