crypto: x86/sha1 - re-enable the AVX variant
[deliverable/linux.git] / arch / x86 / crypto / sha1_ssse3_glue.c
1 /*
2 * Cryptographic API.
3 *
4 * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using
5 * Supplemental SSE3 instructions.
6 *
7 * This file is based on sha1_generic.c
8 *
9 * Copyright (c) Alan Smithee.
10 * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
11 * Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
12 * Copyright (c) Mathias Krause <minipli@googlemail.com>
13 * Copyright (c) Chandramouli Narayanan <mouli@linux.intel.com>
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the Free
17 * Software Foundation; either version 2 of the License, or (at your option)
18 * any later version.
19 *
20 */
21
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24 #include <crypto/internal/hash.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/mm.h>
28 #include <linux/cryptohash.h>
29 #include <linux/types.h>
30 #include <crypto/sha.h>
31 #include <asm/byteorder.h>
32 #include <asm/i387.h>
33 #include <asm/xcr.h>
34 #include <asm/xsave.h>
35
36
37 asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data,
38 unsigned int rounds);
39 #ifdef CONFIG_AS_AVX
40 asmlinkage void sha1_transform_avx(u32 *digest, const char *data,
41 unsigned int rounds);
42 #endif
43 #ifdef CONFIG_AS_AVX2
44 #define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */
45
46 asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
47 unsigned int rounds);
48 #endif
49
50 static asmlinkage void (*sha1_transform_asm)(u32 *, const char *, unsigned int);
51
52
53 static int sha1_ssse3_init(struct shash_desc *desc)
54 {
55 struct sha1_state *sctx = shash_desc_ctx(desc);
56
57 *sctx = (struct sha1_state){
58 .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
59 };
60
61 return 0;
62 }
63
64 static int __sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
65 unsigned int len, unsigned int partial)
66 {
67 struct sha1_state *sctx = shash_desc_ctx(desc);
68 unsigned int done = 0;
69
70 sctx->count += len;
71
72 if (partial) {
73 done = SHA1_BLOCK_SIZE - partial;
74 memcpy(sctx->buffer + partial, data, done);
75 sha1_transform_asm(sctx->state, sctx->buffer, 1);
76 }
77
78 if (len - done >= SHA1_BLOCK_SIZE) {
79 const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
80
81 sha1_transform_asm(sctx->state, data + done, rounds);
82 done += rounds * SHA1_BLOCK_SIZE;
83 }
84
85 memcpy(sctx->buffer, data + done, len - done);
86
87 return 0;
88 }
89
90 static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
91 unsigned int len)
92 {
93 struct sha1_state *sctx = shash_desc_ctx(desc);
94 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
95 int res;
96
97 /* Handle the fast case right here */
98 if (partial + len < SHA1_BLOCK_SIZE) {
99 sctx->count += len;
100 memcpy(sctx->buffer + partial, data, len);
101
102 return 0;
103 }
104
105 if (!irq_fpu_usable()) {
106 res = crypto_sha1_update(desc, data, len);
107 } else {
108 kernel_fpu_begin();
109 res = __sha1_ssse3_update(desc, data, len, partial);
110 kernel_fpu_end();
111 }
112
113 return res;
114 }
115
116
117 /* Add padding and return the message digest. */
118 static int sha1_ssse3_final(struct shash_desc *desc, u8 *out)
119 {
120 struct sha1_state *sctx = shash_desc_ctx(desc);
121 unsigned int i, index, padlen;
122 __be32 *dst = (__be32 *)out;
123 __be64 bits;
124 static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
125
126 bits = cpu_to_be64(sctx->count << 3);
127
128 /* Pad out to 56 mod 64 and append length */
129 index = sctx->count % SHA1_BLOCK_SIZE;
130 padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
131 if (!irq_fpu_usable()) {
132 crypto_sha1_update(desc, padding, padlen);
133 crypto_sha1_update(desc, (const u8 *)&bits, sizeof(bits));
134 } else {
135 kernel_fpu_begin();
136 /* We need to fill a whole block for __sha1_ssse3_update() */
137 if (padlen <= 56) {
138 sctx->count += padlen;
139 memcpy(sctx->buffer + index, padding, padlen);
140 } else {
141 __sha1_ssse3_update(desc, padding, padlen, index);
142 }
143 __sha1_ssse3_update(desc, (const u8 *)&bits, sizeof(bits), 56);
144 kernel_fpu_end();
145 }
146
147 /* Store state in digest */
148 for (i = 0; i < 5; i++)
149 dst[i] = cpu_to_be32(sctx->state[i]);
150
151 /* Wipe context */
152 memset(sctx, 0, sizeof(*sctx));
153
154 return 0;
155 }
156
157 static int sha1_ssse3_export(struct shash_desc *desc, void *out)
158 {
159 struct sha1_state *sctx = shash_desc_ctx(desc);
160
161 memcpy(out, sctx, sizeof(*sctx));
162
163 return 0;
164 }
165
166 static int sha1_ssse3_import(struct shash_desc *desc, const void *in)
167 {
168 struct sha1_state *sctx = shash_desc_ctx(desc);
169
170 memcpy(sctx, in, sizeof(*sctx));
171
172 return 0;
173 }
174
175 #ifdef CONFIG_AS_AVX2
176 static void sha1_apply_transform_avx2(u32 *digest, const char *data,
177 unsigned int rounds)
178 {
179 /* Select the optimal transform based on data block size */
180 if (rounds >= SHA1_AVX2_BLOCK_OPTSIZE)
181 sha1_transform_avx2(digest, data, rounds);
182 else
183 sha1_transform_avx(digest, data, rounds);
184 }
185 #endif
186
187 static struct shash_alg alg = {
188 .digestsize = SHA1_DIGEST_SIZE,
189 .init = sha1_ssse3_init,
190 .update = sha1_ssse3_update,
191 .final = sha1_ssse3_final,
192 .export = sha1_ssse3_export,
193 .import = sha1_ssse3_import,
194 .descsize = sizeof(struct sha1_state),
195 .statesize = sizeof(struct sha1_state),
196 .base = {
197 .cra_name = "sha1",
198 .cra_driver_name= "sha1-ssse3",
199 .cra_priority = 150,
200 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
201 .cra_blocksize = SHA1_BLOCK_SIZE,
202 .cra_module = THIS_MODULE,
203 }
204 };
205
206 #ifdef CONFIG_AS_AVX
207 static bool __init avx_usable(void)
208 {
209 u64 xcr0;
210
211 if (!cpu_has_avx || !cpu_has_osxsave)
212 return false;
213
214 xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
215 if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
216 pr_info("AVX detected but unusable.\n");
217
218 return false;
219 }
220
221 return true;
222 }
223
224 #ifdef CONFIG_AS_AVX2
225 static bool __init avx2_usable(void)
226 {
227 if (avx_usable() && cpu_has_avx2 && boot_cpu_has(X86_FEATURE_BMI1) &&
228 boot_cpu_has(X86_FEATURE_BMI2))
229 return true;
230
231 return false;
232 }
233 #endif
234 #endif
235
236 static int __init sha1_ssse3_mod_init(void)
237 {
238 char *algo_name;
239
240 /* test for SSSE3 first */
241 if (cpu_has_ssse3) {
242 sha1_transform_asm = sha1_transform_ssse3;
243 algo_name = "SSSE3";
244 }
245
246 #ifdef CONFIG_AS_AVX
247 /* allow AVX to override SSSE3, it's a little faster */
248 if (avx_usable()) {
249 sha1_transform_asm = sha1_transform_avx;
250 algo_name = "AVX";
251 #ifdef CONFIG_AS_AVX2
252 /* allow AVX2 to override AVX, it's a little faster */
253 if (avx2_usable()) {
254 sha1_transform_asm = sha1_apply_transform_avx2;
255 algo_name = "AVX2";
256 }
257 #endif
258 }
259 #endif
260
261 if (sha1_transform_asm) {
262 pr_info("Using %s optimized SHA-1 implementation\n", algo_name);
263 return crypto_register_shash(&alg);
264 }
265 pr_info("Neither AVX nor AVX2 nor SSSE3 is available/usable.\n");
266
267 return -ENODEV;
268 }
269
270 static void __exit sha1_ssse3_mod_fini(void)
271 {
272 crypto_unregister_shash(&alg);
273 }
274
275 module_init(sha1_ssse3_mod_init);
276 module_exit(sha1_ssse3_mod_fini);
277
278 MODULE_LICENSE("GPL");
279 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
280
281 MODULE_ALIAS("sha1");
This page took 0.03612 seconds and 5 git commands to generate.