Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_XOR_64_H |
2 | #define _ASM_X86_XOR_64_H | |
0db125c4 | 3 | |
1da177e4 | 4 | static struct xor_block_template xor_block_sse = { |
687c8054 JP |
5 | .name = "generic_sse", |
6 | .do_2 = xor_sse_2, | |
7 | .do_3 = xor_sse_3, | |
8 | .do_4 = xor_sse_4, | |
9 | .do_5 = xor_sse_5, | |
1da177e4 LT |
10 | }; |
11 | ||
ea4d26ae JK |
12 | |
13 | /* Also try the AVX routines */ | |
a1ce3928 | 14 | #include <asm/xor_avx.h> |
ea4d26ae | 15 | |
f317820c JB |
16 | /* We force the use of the SSE xor block because it can write around L2. |
17 | We may also be able to load into the L1 only depending on how the cpu | |
18 | deals with a load to a line that is being prefetched. */ | |
1da177e4 | 19 | #undef XOR_TRY_TEMPLATES |
687c8054 JP |
20 | #define XOR_TRY_TEMPLATES \ |
21 | do { \ | |
ea4d26ae | 22 | AVX_XOR_SPEED; \ |
f317820c | 23 | xor_speed(&xor_block_sse_pf64); \ |
687c8054 JP |
24 | xor_speed(&xor_block_sse); \ |
25 | } while (0) | |
1da177e4 | 26 | |
1965aae3 | 27 | #endif /* _ASM_X86_XOR_64_H */ |