Revert "crypto: aesni - disable "by8" AVX CTR optimization"
[deliverable/linux.git] / arch / x86 / crypto / aesni-intel_glue.c
CommitLineData
54b6a1bd
HY
1/*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
0bd82f5f
TS
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
15 *
54b6a1bd
HY
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22#include <linux/hardirq.h>
23#include <linux/types.h>
24#include <linux/crypto.h>
7c52d551 25#include <linux/module.h>
54b6a1bd
HY
26#include <linux/err.h>
27#include <crypto/algapi.h>
28#include <crypto/aes.h>
29#include <crypto/cryptd.h>
12387a46 30#include <crypto/ctr.h>
023af608
JK
31#include <crypto/b128ops.h>
32#include <crypto/lrw.h>
33#include <crypto/xts.h>
3bd391f0 34#include <asm/cpu_device_id.h>
54b6a1bd 35#include <asm/i387.h>
70ef2601 36#include <asm/crypto/aes.h>
801201aa 37#include <crypto/ablk_helper.h>
0bd82f5f
TS
38#include <crypto/scatterwalk.h>
39#include <crypto/internal/aead.h>
40#include <linux/workqueue.h>
41#include <linux/spinlock.h>
c456a9cd
JK
42#ifdef CONFIG_X86_64
43#include <asm/crypto/glue_helper.h>
44#endif
54b6a1bd 45
2cf4ac8b
HY
46#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
47#define HAS_PCBC
48#endif
49
0bd82f5f
TS
50/* This data is stored at the end of the crypto_tfm struct.
51 * It's a type of per "session" data storage location.
52 * This needs to be 16 byte aligned.
53 */
54struct aesni_rfc4106_gcm_ctx {
55 u8 hash_subkey[16];
56 struct crypto_aes_ctx aes_key_expanded;
57 u8 nonce[4];
58 struct cryptd_aead *cryptd_tfm;
59};
60
61struct aesni_gcm_set_hash_subkey_result {
62 int err;
63 struct completion completion;
64};
65
66struct aesni_hash_subkey_req_data {
67 u8 iv[16];
68 struct aesni_gcm_set_hash_subkey_result result;
69 struct scatterlist sg;
70};
71
72#define AESNI_ALIGN (16)
54b6a1bd 73#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
0bd82f5f 74#define RFC4106_HASH_SUBKEY_SIZE 16
54b6a1bd 75
023af608
JK
76struct aesni_lrw_ctx {
77 struct lrw_table_ctx lrw_table;
78 u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
79};
80
81struct aesni_xts_ctx {
82 u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
83 u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
84};
85
54b6a1bd
HY
86asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
87 unsigned int key_len);
88asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
89 const u8 *in);
90asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
91 const u8 *in);
92asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
93 const u8 *in, unsigned int len);
94asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
95 const u8 *in, unsigned int len);
96asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
97 const u8 *in, unsigned int len, u8 *iv);
98asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
99 const u8 *in, unsigned int len, u8 *iv);
9bed4aca
RD
100
101int crypto_fpu_init(void);
102void crypto_fpu_exit(void);
103
d764593a
TC
104#define AVX_GEN2_OPTSIZE 640
105#define AVX_GEN4_OPTSIZE 4096
106
0d258efb 107#ifdef CONFIG_X86_64
22cddcc7 108
109static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
110 const u8 *in, unsigned int len, u8 *iv);
12387a46
HY
111asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
112 const u8 *in, unsigned int len, u8 *iv);
54b6a1bd 113
c456a9cd
JK
114asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
115 const u8 *in, bool enc, u8 *iv);
116
0bd82f5f
TS
117/* asmlinkage void aesni_gcm_enc()
118 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
119 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
120 * const u8 *in, Plaintext input
121 * unsigned long plaintext_len, Length of data in bytes for encryption.
122 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
123 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
124 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
125 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
126 * const u8 *aad, Additional Authentication Data (AAD)
127 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
128 * is going to be 8 or 12 bytes
129 * u8 *auth_tag, Authenticated Tag output.
130 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
131 * Valid values are 16 (most likely), 12 or 8.
132 */
133asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
134 const u8 *in, unsigned long plaintext_len, u8 *iv,
135 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
136 u8 *auth_tag, unsigned long auth_tag_len);
137
138/* asmlinkage void aesni_gcm_dec()
139 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
140 * u8 *out, Plaintext output. Decrypt in-place is allowed.
141 * const u8 *in, Ciphertext input
142 * unsigned long ciphertext_len, Length of data in bytes for decryption.
143 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
144 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
145 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
146 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
147 * const u8 *aad, Additional Authentication Data (AAD)
148 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
149 * to be 8 or 12 bytes
150 * u8 *auth_tag, Authenticated Tag output.
151 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
152 * Valid values are 16 (most likely), 12 or 8.
153 */
154asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
155 const u8 *in, unsigned long ciphertext_len, u8 *iv,
156 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
157 u8 *auth_tag, unsigned long auth_tag_len);
158
d764593a
TC
159
160#ifdef CONFIG_AS_AVX
22cddcc7 161asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
162 void *keys, u8 *out, unsigned int num_bytes);
163asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
164 void *keys, u8 *out, unsigned int num_bytes);
165asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
166 void *keys, u8 *out, unsigned int num_bytes);
d764593a
TC
167/*
168 * asmlinkage void aesni_gcm_precomp_avx_gen2()
169 * gcm_data *my_ctx_data, context data
170 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
171 */
172asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
173
174asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
175 const u8 *in, unsigned long plaintext_len, u8 *iv,
176 const u8 *aad, unsigned long aad_len,
177 u8 *auth_tag, unsigned long auth_tag_len);
178
179asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
180 const u8 *in, unsigned long ciphertext_len, u8 *iv,
181 const u8 *aad, unsigned long aad_len,
182 u8 *auth_tag, unsigned long auth_tag_len);
183
184static void aesni_gcm_enc_avx(void *ctx, u8 *out,
185 const u8 *in, unsigned long plaintext_len, u8 *iv,
186 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
187 u8 *auth_tag, unsigned long auth_tag_len)
188{
189 if (plaintext_len < AVX_GEN2_OPTSIZE) {
190 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
191 aad_len, auth_tag, auth_tag_len);
192 } else {
193 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
194 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
195 aad_len, auth_tag, auth_tag_len);
196 }
197}
198
199static void aesni_gcm_dec_avx(void *ctx, u8 *out,
200 const u8 *in, unsigned long ciphertext_len, u8 *iv,
201 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
202 u8 *auth_tag, unsigned long auth_tag_len)
203{
204 if (ciphertext_len < AVX_GEN2_OPTSIZE) {
205 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
206 aad_len, auth_tag, auth_tag_len);
207 } else {
208 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
209 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
210 aad_len, auth_tag, auth_tag_len);
211 }
212}
213#endif
214
215#ifdef CONFIG_AS_AVX2
216/*
217 * asmlinkage void aesni_gcm_precomp_avx_gen4()
218 * gcm_data *my_ctx_data, context data
219 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
220 */
221asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
222
223asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
224 const u8 *in, unsigned long plaintext_len, u8 *iv,
225 const u8 *aad, unsigned long aad_len,
226 u8 *auth_tag, unsigned long auth_tag_len);
227
228asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
229 const u8 *in, unsigned long ciphertext_len, u8 *iv,
230 const u8 *aad, unsigned long aad_len,
231 u8 *auth_tag, unsigned long auth_tag_len);
232
233static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
234 const u8 *in, unsigned long plaintext_len, u8 *iv,
235 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
236 u8 *auth_tag, unsigned long auth_tag_len)
237{
238 if (plaintext_len < AVX_GEN2_OPTSIZE) {
239 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
240 aad_len, auth_tag, auth_tag_len);
241 } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
242 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
243 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
244 aad_len, auth_tag, auth_tag_len);
245 } else {
246 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
247 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
248 aad_len, auth_tag, auth_tag_len);
249 }
250}
251
252static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
253 const u8 *in, unsigned long ciphertext_len, u8 *iv,
254 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
255 u8 *auth_tag, unsigned long auth_tag_len)
256{
257 if (ciphertext_len < AVX_GEN2_OPTSIZE) {
258 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
259 aad, aad_len, auth_tag, auth_tag_len);
260 } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
261 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
262 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
263 aad_len, auth_tag, auth_tag_len);
264 } else {
265 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
266 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
267 aad_len, auth_tag, auth_tag_len);
268 }
269}
270#endif
271
272static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
273 const u8 *in, unsigned long plaintext_len, u8 *iv,
274 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
275 u8 *auth_tag, unsigned long auth_tag_len);
276
277static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
278 const u8 *in, unsigned long ciphertext_len, u8 *iv,
279 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
280 u8 *auth_tag, unsigned long auth_tag_len);
281
0bd82f5f
TS
282static inline struct
283aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
284{
285 return
286 (struct aesni_rfc4106_gcm_ctx *)
287 PTR_ALIGN((u8 *)
288 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
289}
559ad0ff 290#endif
0bd82f5f 291
54b6a1bd
HY
292static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
293{
294 unsigned long addr = (unsigned long)raw_ctx;
295 unsigned long align = AESNI_ALIGN;
296
297 if (align <= crypto_tfm_ctx_alignment())
298 align = 1;
299 return (struct crypto_aes_ctx *)ALIGN(addr, align);
300}
301
302static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
303 const u8 *in_key, unsigned int key_len)
304{
305 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
306 u32 *flags = &tfm->crt_flags;
307 int err;
308
309 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
310 key_len != AES_KEYSIZE_256) {
311 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
312 return -EINVAL;
313 }
314
13b79b97 315 if (!irq_fpu_usable())
54b6a1bd
HY
316 err = crypto_aes_expand_key(ctx, in_key, key_len);
317 else {
318 kernel_fpu_begin();
319 err = aesni_set_key(ctx, in_key, key_len);
320 kernel_fpu_end();
321 }
322
323 return err;
324}
325
326static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
327 unsigned int key_len)
328{
329 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
330}
331
332static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
333{
334 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
335
13b79b97 336 if (!irq_fpu_usable())
54b6a1bd
HY
337 crypto_aes_encrypt_x86(ctx, dst, src);
338 else {
339 kernel_fpu_begin();
340 aesni_enc(ctx, dst, src);
341 kernel_fpu_end();
342 }
343}
344
345static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
346{
347 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
348
13b79b97 349 if (!irq_fpu_usable())
54b6a1bd
HY
350 crypto_aes_decrypt_x86(ctx, dst, src);
351 else {
352 kernel_fpu_begin();
353 aesni_dec(ctx, dst, src);
354 kernel_fpu_end();
355 }
356}
357
2cf4ac8b
HY
358static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
359{
360 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
361
362 aesni_enc(ctx, dst, src);
363}
364
365static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
366{
367 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
368
369 aesni_dec(ctx, dst, src);
370}
371
54b6a1bd
HY
372static int ecb_encrypt(struct blkcipher_desc *desc,
373 struct scatterlist *dst, struct scatterlist *src,
374 unsigned int nbytes)
375{
376 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
377 struct blkcipher_walk walk;
378 int err;
379
380 blkcipher_walk_init(&walk, dst, src, nbytes);
381 err = blkcipher_walk_virt(desc, &walk);
9251b64f 382 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
383
384 kernel_fpu_begin();
385 while ((nbytes = walk.nbytes)) {
386 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
387 nbytes & AES_BLOCK_MASK);
388 nbytes &= AES_BLOCK_SIZE - 1;
389 err = blkcipher_walk_done(desc, &walk, nbytes);
390 }
391 kernel_fpu_end();
392
393 return err;
394}
395
396static int ecb_decrypt(struct blkcipher_desc *desc,
397 struct scatterlist *dst, struct scatterlist *src,
398 unsigned int nbytes)
399{
400 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
401 struct blkcipher_walk walk;
402 int err;
403
404 blkcipher_walk_init(&walk, dst, src, nbytes);
405 err = blkcipher_walk_virt(desc, &walk);
9251b64f 406 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
407
408 kernel_fpu_begin();
409 while ((nbytes = walk.nbytes)) {
410 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
411 nbytes & AES_BLOCK_MASK);
412 nbytes &= AES_BLOCK_SIZE - 1;
413 err = blkcipher_walk_done(desc, &walk, nbytes);
414 }
415 kernel_fpu_end();
416
417 return err;
418}
419
54b6a1bd
HY
420static int cbc_encrypt(struct blkcipher_desc *desc,
421 struct scatterlist *dst, struct scatterlist *src,
422 unsigned int nbytes)
423{
424 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
425 struct blkcipher_walk walk;
426 int err;
427
428 blkcipher_walk_init(&walk, dst, src, nbytes);
429 err = blkcipher_walk_virt(desc, &walk);
9251b64f 430 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
431
432 kernel_fpu_begin();
433 while ((nbytes = walk.nbytes)) {
434 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
435 nbytes & AES_BLOCK_MASK, walk.iv);
436 nbytes &= AES_BLOCK_SIZE - 1;
437 err = blkcipher_walk_done(desc, &walk, nbytes);
438 }
439 kernel_fpu_end();
440
441 return err;
442}
443
444static int cbc_decrypt(struct blkcipher_desc *desc,
445 struct scatterlist *dst, struct scatterlist *src,
446 unsigned int nbytes)
447{
448 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
449 struct blkcipher_walk walk;
450 int err;
451
452 blkcipher_walk_init(&walk, dst, src, nbytes);
453 err = blkcipher_walk_virt(desc, &walk);
9251b64f 454 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
455
456 kernel_fpu_begin();
457 while ((nbytes = walk.nbytes)) {
458 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
459 nbytes & AES_BLOCK_MASK, walk.iv);
460 nbytes &= AES_BLOCK_SIZE - 1;
461 err = blkcipher_walk_done(desc, &walk, nbytes);
462 }
463 kernel_fpu_end();
464
465 return err;
466}
467
0d258efb 468#ifdef CONFIG_X86_64
12387a46
HY
469static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
470 struct blkcipher_walk *walk)
471{
472 u8 *ctrblk = walk->iv;
473 u8 keystream[AES_BLOCK_SIZE];
474 u8 *src = walk->src.virt.addr;
475 u8 *dst = walk->dst.virt.addr;
476 unsigned int nbytes = walk->nbytes;
477
478 aesni_enc(ctx, keystream, ctrblk);
479 crypto_xor(keystream, src, nbytes);
480 memcpy(dst, keystream, nbytes);
481 crypto_inc(ctrblk, AES_BLOCK_SIZE);
482}
483
5cfed7b3 484#ifdef CONFIG_AS_AVX
22cddcc7 485static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
486 const u8 *in, unsigned int len, u8 *iv)
487{
488 /*
489 * based on key length, override with the by8 version
490 * of ctr mode encryption/decryption for improved performance
491 * aes_set_key_common() ensures that key length is one of
492 * {128,192,256}
493 */
494 if (ctx->key_length == AES_KEYSIZE_128)
495 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
496 else if (ctx->key_length == AES_KEYSIZE_192)
497 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
498 else
499 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
500}
501#endif
502
12387a46
HY
503static int ctr_crypt(struct blkcipher_desc *desc,
504 struct scatterlist *dst, struct scatterlist *src,
505 unsigned int nbytes)
506{
507 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
508 struct blkcipher_walk walk;
509 int err;
510
511 blkcipher_walk_init(&walk, dst, src, nbytes);
512 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
513 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
514
515 kernel_fpu_begin();
516 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
22cddcc7 517 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
518 nbytes & AES_BLOCK_MASK, walk.iv);
12387a46
HY
519 nbytes &= AES_BLOCK_SIZE - 1;
520 err = blkcipher_walk_done(desc, &walk, nbytes);
521 }
522 if (walk.nbytes) {
523 ctr_crypt_final(ctx, &walk);
524 err = blkcipher_walk_done(desc, &walk, 0);
525 }
526 kernel_fpu_end();
527
528 return err;
529}
0d258efb 530#endif
12387a46 531
54b6a1bd
HY
532static int ablk_ecb_init(struct crypto_tfm *tfm)
533{
ef45b834 534 return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
54b6a1bd
HY
535}
536
54b6a1bd
HY
537static int ablk_cbc_init(struct crypto_tfm *tfm)
538{
ef45b834 539 return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
54b6a1bd
HY
540}
541
0d258efb 542#ifdef CONFIG_X86_64
2cf4ac8b
HY
543static int ablk_ctr_init(struct crypto_tfm *tfm)
544{
ef45b834 545 return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
2cf4ac8b
HY
546}
547
0d258efb 548#endif
2cf4ac8b 549
2cf4ac8b
HY
550#ifdef HAS_PCBC
551static int ablk_pcbc_init(struct crypto_tfm *tfm)
552{
ef45b834 553 return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
2cf4ac8b 554}
2cf4ac8b
HY
555#endif
556
023af608 557static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
2cf4ac8b 558{
023af608
JK
559 aesni_ecb_enc(ctx, blks, blks, nbytes);
560}
561
562static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
563{
564 aesni_ecb_dec(ctx, blks, blks, nbytes);
565}
566
567static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
568 unsigned int keylen)
569{
570 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
571 int err;
572
573 err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
574 keylen - AES_BLOCK_SIZE);
575 if (err)
576 return err;
577
578 return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
579}
580
581static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
582{
583 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
584
585 lrw_free_table(&ctx->lrw_table);
586}
587
588static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
589 struct scatterlist *src, unsigned int nbytes)
590{
591 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
592 be128 buf[8];
593 struct lrw_crypt_req req = {
594 .tbuf = buf,
595 .tbuflen = sizeof(buf),
596
597 .table_ctx = &ctx->lrw_table,
598 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
599 .crypt_fn = lrw_xts_encrypt_callback,
600 };
601 int ret;
602
603 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
604
605 kernel_fpu_begin();
606 ret = lrw_crypt(desc, dst, src, nbytes, &req);
607 kernel_fpu_end();
608
609 return ret;
610}
611
612static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
613 struct scatterlist *src, unsigned int nbytes)
614{
615 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
616 be128 buf[8];
617 struct lrw_crypt_req req = {
618 .tbuf = buf,
619 .tbuflen = sizeof(buf),
620
621 .table_ctx = &ctx->lrw_table,
622 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
623 .crypt_fn = lrw_xts_decrypt_callback,
624 };
625 int ret;
626
627 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
628
629 kernel_fpu_begin();
630 ret = lrw_crypt(desc, dst, src, nbytes, &req);
631 kernel_fpu_end();
632
633 return ret;
634}
635
636static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
637 unsigned int keylen)
638{
639 struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
640 u32 *flags = &tfm->crt_flags;
641 int err;
642
643 /* key consists of keys of equal size concatenated, therefore
644 * the length must be even
645 */
646 if (keylen % 2) {
647 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
648 return -EINVAL;
649 }
650
651 /* first half of xts-key is for crypt */
652 err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
653 if (err)
654 return err;
655
656 /* second half of xts-key is for tweak */
657 return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
658 keylen / 2);
659}
660
661
32bec973
JK
662static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
663{
664 aesni_enc(ctx, out, in);
665}
666
c456a9cd
JK
667#ifdef CONFIG_X86_64
668
669static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
670{
671 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
672}
673
674static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
675{
676 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
677}
678
679static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
680{
681 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
682}
683
684static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
685{
686 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
687}
688
689static const struct common_glue_ctx aesni_enc_xts = {
690 .num_funcs = 2,
691 .fpu_blocks_limit = 1,
692
693 .funcs = { {
694 .num_blocks = 8,
695 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
696 }, {
697 .num_blocks = 1,
698 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
699 } }
700};
701
702static const struct common_glue_ctx aesni_dec_xts = {
703 .num_funcs = 2,
704 .fpu_blocks_limit = 1,
705
706 .funcs = { {
707 .num_blocks = 8,
708 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
709 }, {
710 .num_blocks = 1,
711 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
712 } }
713};
714
715static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
716 struct scatterlist *src, unsigned int nbytes)
717{
718 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
719
720 return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
721 XTS_TWEAK_CAST(aesni_xts_tweak),
722 aes_ctx(ctx->raw_tweak_ctx),
723 aes_ctx(ctx->raw_crypt_ctx));
724}
725
726static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
727 struct scatterlist *src, unsigned int nbytes)
728{
729 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
730
731 return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
732 XTS_TWEAK_CAST(aesni_xts_tweak),
733 aes_ctx(ctx->raw_tweak_ctx),
734 aes_ctx(ctx->raw_crypt_ctx));
735}
736
737#else
738
023af608
JK
739static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
740 struct scatterlist *src, unsigned int nbytes)
741{
742 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
743 be128 buf[8];
744 struct xts_crypt_req req = {
745 .tbuf = buf,
746 .tbuflen = sizeof(buf),
747
748 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
32bec973 749 .tweak_fn = aesni_xts_tweak,
023af608
JK
750 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
751 .crypt_fn = lrw_xts_encrypt_callback,
752 };
753 int ret;
754
755 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
756
757 kernel_fpu_begin();
758 ret = xts_crypt(desc, dst, src, nbytes, &req);
759 kernel_fpu_end();
760
761 return ret;
762}
763
764static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
765 struct scatterlist *src, unsigned int nbytes)
766{
767 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
768 be128 buf[8];
769 struct xts_crypt_req req = {
770 .tbuf = buf,
771 .tbuflen = sizeof(buf),
772
773 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
32bec973 774 .tweak_fn = aesni_xts_tweak,
023af608
JK
775 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
776 .crypt_fn = lrw_xts_decrypt_callback,
777 };
778 int ret;
779
780 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
781
782 kernel_fpu_begin();
783 ret = xts_crypt(desc, dst, src, nbytes, &req);
784 kernel_fpu_end();
785
786 return ret;
2cf4ac8b 787}
2cf4ac8b 788
c456a9cd
JK
789#endif
790
559ad0ff 791#ifdef CONFIG_X86_64
0bd82f5f
TS
792static int rfc4106_init(struct crypto_tfm *tfm)
793{
794 struct cryptd_aead *cryptd_tfm;
795 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
796 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
60af520c
TS
797 struct crypto_aead *cryptd_child;
798 struct aesni_rfc4106_gcm_ctx *child_ctx;
0bd82f5f
TS
799 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
800 if (IS_ERR(cryptd_tfm))
801 return PTR_ERR(cryptd_tfm);
60af520c
TS
802
803 cryptd_child = cryptd_aead_child(cryptd_tfm);
804 child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
805 memcpy(child_ctx, ctx, sizeof(*ctx));
0bd82f5f
TS
806 ctx->cryptd_tfm = cryptd_tfm;
807 tfm->crt_aead.reqsize = sizeof(struct aead_request)
808 + crypto_aead_reqsize(&cryptd_tfm->base);
809 return 0;
810}
811
812static void rfc4106_exit(struct crypto_tfm *tfm)
813{
814 struct aesni_rfc4106_gcm_ctx *ctx =
815 (struct aesni_rfc4106_gcm_ctx *)
816 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
817 if (!IS_ERR(ctx->cryptd_tfm))
818 cryptd_free_aead(ctx->cryptd_tfm);
819 return;
820}
821
822static void
823rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
824{
825 struct aesni_gcm_set_hash_subkey_result *result = req->data;
826
827 if (err == -EINPROGRESS)
828 return;
829 result->err = err;
830 complete(&result->completion);
831}
832
833static int
834rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
835{
836 struct crypto_ablkcipher *ctr_tfm;
837 struct ablkcipher_request *req;
838 int ret = -EINVAL;
839 struct aesni_hash_subkey_req_data *req_data;
840
841 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
842 if (IS_ERR(ctr_tfm))
843 return PTR_ERR(ctr_tfm);
844
845 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
846
847 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
7efd95f6 848 if (ret)
fc9044e2 849 goto out_free_ablkcipher;
0bd82f5f 850
fc9044e2 851 ret = -ENOMEM;
0bd82f5f 852 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
fc9044e2 853 if (!req)
7efd95f6 854 goto out_free_ablkcipher;
0bd82f5f
TS
855
856 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
fc9044e2 857 if (!req_data)
7efd95f6 858 goto out_free_request;
fc9044e2 859
0bd82f5f
TS
860 memset(req_data->iv, 0, sizeof(req_data->iv));
861
862 /* Clear the data in the hash sub key container to zero.*/
863 /* We want to cipher all zeros to create the hash sub key. */
864 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
865
866 init_completion(&req_data->result.completion);
867 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
868 ablkcipher_request_set_tfm(req, ctr_tfm);
869 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
870 CRYPTO_TFM_REQ_MAY_BACKLOG,
871 rfc4106_set_hash_subkey_done,
872 &req_data->result);
873
874 ablkcipher_request_set_crypt(req, &req_data->sg,
875 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
876
877 ret = crypto_ablkcipher_encrypt(req);
878 if (ret == -EINPROGRESS || ret == -EBUSY) {
879 ret = wait_for_completion_interruptible
880 (&req_data->result.completion);
881 if (!ret)
882 ret = req_data->result.err;
883 }
fc9044e2 884 kfree(req_data);
7efd95f6 885out_free_request:
0bd82f5f 886 ablkcipher_request_free(req);
7efd95f6 887out_free_ablkcipher:
0bd82f5f
TS
888 crypto_free_ablkcipher(ctr_tfm);
889 return ret;
890}
891
892static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
893 unsigned int key_len)
894{
895 int ret = 0;
896 struct crypto_tfm *tfm = crypto_aead_tfm(parent);
897 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
60af520c
TS
898 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
899 struct aesni_rfc4106_gcm_ctx *child_ctx =
900 aesni_rfc4106_gcm_ctx_get(cryptd_child);
bf084d8f 901 u8 *new_key_align, *new_key_mem = NULL;
0bd82f5f
TS
902
903 if (key_len < 4) {
904 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
905 return -EINVAL;
906 }
907 /*Account for 4 byte nonce at the end.*/
908 key_len -= 4;
909 if (key_len != AES_KEYSIZE_128) {
910 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
911 return -EINVAL;
912 }
913
914 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
915 /*This must be on a 16 byte boundary!*/
916 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
917 return -EINVAL;
918
919 if ((unsigned long)key % AESNI_ALIGN) {
920 /*key is not aligned: use an auxuliar aligned pointer*/
921 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
922 if (!new_key_mem)
923 return -ENOMEM;
924
bf084d8f
MB
925 new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
926 memcpy(new_key_align, key, key_len);
927 key = new_key_align;
0bd82f5f
TS
928 }
929
930 if (!irq_fpu_usable())
931 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
932 key, key_len);
933 else {
934 kernel_fpu_begin();
935 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
936 kernel_fpu_end();
937 }
938 /*This must be on a 16 byte boundary!*/
939 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
940 ret = -EINVAL;
941 goto exit;
942 }
943 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
60af520c 944 memcpy(child_ctx, ctx, sizeof(*ctx));
0bd82f5f
TS
945exit:
946 kfree(new_key_mem);
947 return ret;
948}
949
950/* This is the Integrity Check Value (aka the authentication tag length and can
951 * be 8, 12 or 16 bytes long. */
952static int rfc4106_set_authsize(struct crypto_aead *parent,
953 unsigned int authsize)
954{
955 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
956 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
957
958 switch (authsize) {
959 case 8:
960 case 12:
961 case 16:
962 break;
963 default:
964 return -EINVAL;
965 }
966 crypto_aead_crt(parent)->authsize = authsize;
967 crypto_aead_crt(cryptd_child)->authsize = authsize;
968 return 0;
969}
970
971static int rfc4106_encrypt(struct aead_request *req)
972{
973 int ret;
974 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
975 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
0bd82f5f
TS
976
977 if (!irq_fpu_usable()) {
978 struct aead_request *cryptd_req =
979 (struct aead_request *) aead_request_ctx(req);
980 memcpy(cryptd_req, req, sizeof(*req));
981 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
982 return crypto_aead_encrypt(cryptd_req);
983 } else {
60af520c 984 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
0bd82f5f
TS
985 kernel_fpu_begin();
986 ret = cryptd_child->base.crt_aead.encrypt(req);
987 kernel_fpu_end();
988 return ret;
989 }
990}
991
992static int rfc4106_decrypt(struct aead_request *req)
993{
994 int ret;
995 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
996 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
0bd82f5f
TS
997
998 if (!irq_fpu_usable()) {
999 struct aead_request *cryptd_req =
1000 (struct aead_request *) aead_request_ctx(req);
1001 memcpy(cryptd_req, req, sizeof(*req));
1002 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1003 return crypto_aead_decrypt(cryptd_req);
1004 } else {
60af520c 1005 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
0bd82f5f
TS
1006 kernel_fpu_begin();
1007 ret = cryptd_child->base.crt_aead.decrypt(req);
1008 kernel_fpu_end();
1009 return ret;
1010 }
1011}
1012
0bd82f5f
TS
1013static int __driver_rfc4106_encrypt(struct aead_request *req)
1014{
1015 u8 one_entry_in_sg = 0;
1016 u8 *src, *dst, *assoc;
1017 __be32 counter = cpu_to_be32(1);
1018 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1019 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1020 void *aes_ctx = &(ctx->aes_key_expanded);
1021 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1022 u8 iv_tab[16+AESNI_ALIGN];
1023 u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
1024 struct scatter_walk src_sg_walk;
1025 struct scatter_walk assoc_sg_walk;
1026 struct scatter_walk dst_sg_walk;
1027 unsigned int i;
1028
1029 /* Assuming we are supporting rfc4106 64-bit extended */
1030 /* sequence numbers We need to have the AAD length equal */
1031 /* to 8 or 12 bytes */
1032 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1033 return -EINVAL;
1034 /* IV below built */
1035 for (i = 0; i < 4; i++)
1036 *(iv+i) = ctx->nonce[i];
1037 for (i = 0; i < 8; i++)
1038 *(iv+4+i) = req->iv[i];
1039 *((__be32 *)(iv+12)) = counter;
1040
1041 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1042 one_entry_in_sg = 1;
1043 scatterwalk_start(&src_sg_walk, req->src);
1044 scatterwalk_start(&assoc_sg_walk, req->assoc);
8fd75e12
CW
1045 src = scatterwalk_map(&src_sg_walk);
1046 assoc = scatterwalk_map(&assoc_sg_walk);
0bd82f5f
TS
1047 dst = src;
1048 if (unlikely(req->src != req->dst)) {
1049 scatterwalk_start(&dst_sg_walk, req->dst);
8fd75e12 1050 dst = scatterwalk_map(&dst_sg_walk);
0bd82f5f
TS
1051 }
1052
1053 } else {
1054 /* Allocate memory for src, dst, assoc */
1055 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1056 GFP_ATOMIC);
1057 if (unlikely(!src))
1058 return -ENOMEM;
1059 assoc = (src + req->cryptlen + auth_tag_len);
1060 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1061 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1062 req->assoclen, 0);
1063 dst = src;
1064 }
1065
d764593a 1066 aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
0bd82f5f
TS
1067 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1068 + ((unsigned long)req->cryptlen), auth_tag_len);
1069
1070 /* The authTag (aka the Integrity Check Value) needs to be written
1071 * back to the packet. */
1072 if (one_entry_in_sg) {
1073 if (unlikely(req->src != req->dst)) {
8fd75e12 1074 scatterwalk_unmap(dst);
0bd82f5f
TS
1075 scatterwalk_done(&dst_sg_walk, 0, 0);
1076 }
8fd75e12
CW
1077 scatterwalk_unmap(src);
1078 scatterwalk_unmap(assoc);
0bd82f5f
TS
1079 scatterwalk_done(&src_sg_walk, 0, 0);
1080 scatterwalk_done(&assoc_sg_walk, 0, 0);
1081 } else {
1082 scatterwalk_map_and_copy(dst, req->dst, 0,
1083 req->cryptlen + auth_tag_len, 1);
1084 kfree(src);
1085 }
1086 return 0;
1087}
1088
1089static int __driver_rfc4106_decrypt(struct aead_request *req)
1090{
1091 u8 one_entry_in_sg = 0;
1092 u8 *src, *dst, *assoc;
1093 unsigned long tempCipherLen = 0;
1094 __be32 counter = cpu_to_be32(1);
1095 int retval = 0;
1096 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1097 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1098 void *aes_ctx = &(ctx->aes_key_expanded);
1099 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1100 u8 iv_and_authTag[32+AESNI_ALIGN];
1101 u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1102 u8 *authTag = iv + 16;
1103 struct scatter_walk src_sg_walk;
1104 struct scatter_walk assoc_sg_walk;
1105 struct scatter_walk dst_sg_walk;
1106 unsigned int i;
1107
1108 if (unlikely((req->cryptlen < auth_tag_len) ||
1109 (req->assoclen != 8 && req->assoclen != 12)))
1110 return -EINVAL;
1111 /* Assuming we are supporting rfc4106 64-bit extended */
1112 /* sequence numbers We need to have the AAD length */
1113 /* equal to 8 or 12 bytes */
1114
1115 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1116 /* IV below built */
1117 for (i = 0; i < 4; i++)
1118 *(iv+i) = ctx->nonce[i];
1119 for (i = 0; i < 8; i++)
1120 *(iv+4+i) = req->iv[i];
1121 *((__be32 *)(iv+12)) = counter;
1122
1123 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1124 one_entry_in_sg = 1;
1125 scatterwalk_start(&src_sg_walk, req->src);
1126 scatterwalk_start(&assoc_sg_walk, req->assoc);
8fd75e12
CW
1127 src = scatterwalk_map(&src_sg_walk);
1128 assoc = scatterwalk_map(&assoc_sg_walk);
0bd82f5f
TS
1129 dst = src;
1130 if (unlikely(req->src != req->dst)) {
1131 scatterwalk_start(&dst_sg_walk, req->dst);
8fd75e12 1132 dst = scatterwalk_map(&dst_sg_walk);
0bd82f5f
TS
1133 }
1134
1135 } else {
1136 /* Allocate memory for src, dst, assoc */
1137 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1138 if (!src)
1139 return -ENOMEM;
1140 assoc = (src + req->cryptlen + auth_tag_len);
1141 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1142 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1143 req->assoclen, 0);
1144 dst = src;
1145 }
1146
d764593a 1147 aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
0bd82f5f
TS
1148 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1149 authTag, auth_tag_len);
1150
1151 /* Compare generated tag with passed in tag. */
fed28611 1152 retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
0bd82f5f
TS
1153 -EBADMSG : 0;
1154
1155 if (one_entry_in_sg) {
1156 if (unlikely(req->src != req->dst)) {
8fd75e12 1157 scatterwalk_unmap(dst);
0bd82f5f
TS
1158 scatterwalk_done(&dst_sg_walk, 0, 0);
1159 }
8fd75e12
CW
1160 scatterwalk_unmap(src);
1161 scatterwalk_unmap(assoc);
0bd82f5f
TS
1162 scatterwalk_done(&src_sg_walk, 0, 0);
1163 scatterwalk_done(&assoc_sg_walk, 0, 0);
1164 } else {
1165 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
1166 kfree(src);
1167 }
1168 return retval;
1169}
fa46ccb8 1170#endif
0bd82f5f 1171
fa46ccb8
JK
1172static struct crypto_alg aesni_algs[] = { {
1173 .cra_name = "aes",
1174 .cra_driver_name = "aes-aesni",
1175 .cra_priority = 300,
1176 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
1177 .cra_blocksize = AES_BLOCK_SIZE,
1178 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1179 AESNI_ALIGN - 1,
1180 .cra_alignmask = 0,
1181 .cra_module = THIS_MODULE,
1182 .cra_u = {
1183 .cipher = {
1184 .cia_min_keysize = AES_MIN_KEY_SIZE,
1185 .cia_max_keysize = AES_MAX_KEY_SIZE,
1186 .cia_setkey = aes_set_key,
1187 .cia_encrypt = aes_encrypt,
1188 .cia_decrypt = aes_decrypt
1189 }
1190 }
1191}, {
1192 .cra_name = "__aes-aesni",
1193 .cra_driver_name = "__driver-aes-aesni",
1194 .cra_priority = 0,
1195 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
1196 .cra_blocksize = AES_BLOCK_SIZE,
1197 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1198 AESNI_ALIGN - 1,
1199 .cra_alignmask = 0,
1200 .cra_module = THIS_MODULE,
1201 .cra_u = {
1202 .cipher = {
1203 .cia_min_keysize = AES_MIN_KEY_SIZE,
1204 .cia_max_keysize = AES_MAX_KEY_SIZE,
1205 .cia_setkey = aes_set_key,
1206 .cia_encrypt = __aes_encrypt,
1207 .cia_decrypt = __aes_decrypt
1208 }
1209 }
1210}, {
1211 .cra_name = "__ecb-aes-aesni",
1212 .cra_driver_name = "__driver-ecb-aes-aesni",
1213 .cra_priority = 0,
1214 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1215 .cra_blocksize = AES_BLOCK_SIZE,
1216 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1217 AESNI_ALIGN - 1,
1218 .cra_alignmask = 0,
1219 .cra_type = &crypto_blkcipher_type,
1220 .cra_module = THIS_MODULE,
1221 .cra_u = {
1222 .blkcipher = {
1223 .min_keysize = AES_MIN_KEY_SIZE,
1224 .max_keysize = AES_MAX_KEY_SIZE,
1225 .setkey = aes_set_key,
1226 .encrypt = ecb_encrypt,
1227 .decrypt = ecb_decrypt,
1228 },
1229 },
1230}, {
1231 .cra_name = "__cbc-aes-aesni",
1232 .cra_driver_name = "__driver-cbc-aes-aesni",
1233 .cra_priority = 0,
1234 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1235 .cra_blocksize = AES_BLOCK_SIZE,
1236 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1237 AESNI_ALIGN - 1,
1238 .cra_alignmask = 0,
1239 .cra_type = &crypto_blkcipher_type,
1240 .cra_module = THIS_MODULE,
1241 .cra_u = {
1242 .blkcipher = {
1243 .min_keysize = AES_MIN_KEY_SIZE,
1244 .max_keysize = AES_MAX_KEY_SIZE,
1245 .setkey = aes_set_key,
1246 .encrypt = cbc_encrypt,
1247 .decrypt = cbc_decrypt,
1248 },
1249 },
1250}, {
1251 .cra_name = "ecb(aes)",
1252 .cra_driver_name = "ecb-aes-aesni",
1253 .cra_priority = 400,
1254 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1255 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1256 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1257 .cra_alignmask = 0,
1258 .cra_type = &crypto_ablkcipher_type,
1259 .cra_module = THIS_MODULE,
1260 .cra_init = ablk_ecb_init,
1261 .cra_exit = ablk_exit,
1262 .cra_u = {
1263 .ablkcipher = {
1264 .min_keysize = AES_MIN_KEY_SIZE,
1265 .max_keysize = AES_MAX_KEY_SIZE,
1266 .setkey = ablk_set_key,
1267 .encrypt = ablk_encrypt,
1268 .decrypt = ablk_decrypt,
1269 },
1270 },
1271}, {
1272 .cra_name = "cbc(aes)",
1273 .cra_driver_name = "cbc-aes-aesni",
1274 .cra_priority = 400,
1275 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1276 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1277 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1278 .cra_alignmask = 0,
1279 .cra_type = &crypto_ablkcipher_type,
1280 .cra_module = THIS_MODULE,
1281 .cra_init = ablk_cbc_init,
1282 .cra_exit = ablk_exit,
1283 .cra_u = {
1284 .ablkcipher = {
1285 .min_keysize = AES_MIN_KEY_SIZE,
1286 .max_keysize = AES_MAX_KEY_SIZE,
1287 .ivsize = AES_BLOCK_SIZE,
1288 .setkey = ablk_set_key,
1289 .encrypt = ablk_encrypt,
1290 .decrypt = ablk_decrypt,
1291 },
1292 },
1293#ifdef CONFIG_X86_64
1294}, {
1295 .cra_name = "__ctr-aes-aesni",
1296 .cra_driver_name = "__driver-ctr-aes-aesni",
1297 .cra_priority = 0,
1298 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1299 .cra_blocksize = 1,
1300 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1301 AESNI_ALIGN - 1,
1302 .cra_alignmask = 0,
1303 .cra_type = &crypto_blkcipher_type,
1304 .cra_module = THIS_MODULE,
1305 .cra_u = {
1306 .blkcipher = {
1307 .min_keysize = AES_MIN_KEY_SIZE,
1308 .max_keysize = AES_MAX_KEY_SIZE,
1309 .ivsize = AES_BLOCK_SIZE,
1310 .setkey = aes_set_key,
1311 .encrypt = ctr_crypt,
1312 .decrypt = ctr_crypt,
1313 },
1314 },
1315}, {
1316 .cra_name = "ctr(aes)",
1317 .cra_driver_name = "ctr-aes-aesni",
1318 .cra_priority = 400,
1319 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1320 .cra_blocksize = 1,
a9629d71 1321 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1322 .cra_alignmask = 0,
1323 .cra_type = &crypto_ablkcipher_type,
1324 .cra_module = THIS_MODULE,
1325 .cra_init = ablk_ctr_init,
1326 .cra_exit = ablk_exit,
1327 .cra_u = {
1328 .ablkcipher = {
1329 .min_keysize = AES_MIN_KEY_SIZE,
1330 .max_keysize = AES_MAX_KEY_SIZE,
1331 .ivsize = AES_BLOCK_SIZE,
1332 .setkey = ablk_set_key,
1333 .encrypt = ablk_encrypt,
1334 .decrypt = ablk_encrypt,
1335 .geniv = "chainiv",
1336 },
1337 },
1338}, {
1339 .cra_name = "__gcm-aes-aesni",
1340 .cra_driver_name = "__driver-gcm-aes-aesni",
0bd82f5f
TS
1341 .cra_priority = 0,
1342 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
1343 .cra_blocksize = 1,
fa46ccb8
JK
1344 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1345 AESNI_ALIGN,
0bd82f5f
TS
1346 .cra_alignmask = 0,
1347 .cra_type = &crypto_aead_type,
1348 .cra_module = THIS_MODULE,
0bd82f5f
TS
1349 .cra_u = {
1350 .aead = {
1351 .encrypt = __driver_rfc4106_encrypt,
1352 .decrypt = __driver_rfc4106_decrypt,
1353 },
1354 },
fa46ccb8
JK
1355}, {
1356 .cra_name = "rfc4106(gcm(aes))",
1357 .cra_driver_name = "rfc4106-gcm-aesni",
1358 .cra_priority = 400,
1359 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1360 .cra_blocksize = 1,
1361 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1362 AESNI_ALIGN,
1363 .cra_alignmask = 0,
1364 .cra_type = &crypto_nivaead_type,
1365 .cra_module = THIS_MODULE,
1366 .cra_init = rfc4106_init,
1367 .cra_exit = rfc4106_exit,
1368 .cra_u = {
1369 .aead = {
1370 .setkey = rfc4106_set_key,
1371 .setauthsize = rfc4106_set_authsize,
1372 .encrypt = rfc4106_encrypt,
1373 .decrypt = rfc4106_decrypt,
1374 .geniv = "seqiv",
1375 .ivsize = 8,
1376 .maxauthsize = 16,
1377 },
1378 },
fa46ccb8 1379#endif
023af608 1380#ifdef HAS_PCBC
fa46ccb8 1381}, {
023af608
JK
1382 .cra_name = "pcbc(aes)",
1383 .cra_driver_name = "pcbc-aes-aesni",
fa46ccb8
JK
1384 .cra_priority = 400,
1385 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1386 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1387 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1388 .cra_alignmask = 0,
1389 .cra_type = &crypto_ablkcipher_type,
1390 .cra_module = THIS_MODULE,
023af608 1391 .cra_init = ablk_pcbc_init,
fa46ccb8
JK
1392 .cra_exit = ablk_exit,
1393 .cra_u = {
1394 .ablkcipher = {
023af608
JK
1395 .min_keysize = AES_MIN_KEY_SIZE,
1396 .max_keysize = AES_MAX_KEY_SIZE,
fa46ccb8
JK
1397 .ivsize = AES_BLOCK_SIZE,
1398 .setkey = ablk_set_key,
1399 .encrypt = ablk_encrypt,
1400 .decrypt = ablk_decrypt,
1401 },
1402 },
1403#endif
fa46ccb8 1404}, {
023af608
JK
1405 .cra_name = "__lrw-aes-aesni",
1406 .cra_driver_name = "__driver-lrw-aes-aesni",
1407 .cra_priority = 0,
1408 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1409 .cra_blocksize = AES_BLOCK_SIZE,
1410 .cra_ctxsize = sizeof(struct aesni_lrw_ctx),
1411 .cra_alignmask = 0,
1412 .cra_type = &crypto_blkcipher_type,
1413 .cra_module = THIS_MODULE,
1414 .cra_exit = lrw_aesni_exit_tfm,
1415 .cra_u = {
1416 .blkcipher = {
1417 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1418 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1419 .ivsize = AES_BLOCK_SIZE,
1420 .setkey = lrw_aesni_setkey,
1421 .encrypt = lrw_encrypt,
1422 .decrypt = lrw_decrypt,
1423 },
1424 },
1425}, {
1426 .cra_name = "__xts-aes-aesni",
1427 .cra_driver_name = "__driver-xts-aes-aesni",
1428 .cra_priority = 0,
1429 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1430 .cra_blocksize = AES_BLOCK_SIZE,
1431 .cra_ctxsize = sizeof(struct aesni_xts_ctx),
1432 .cra_alignmask = 0,
1433 .cra_type = &crypto_blkcipher_type,
1434 .cra_module = THIS_MODULE,
1435 .cra_u = {
1436 .blkcipher = {
1437 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1438 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1439 .ivsize = AES_BLOCK_SIZE,
1440 .setkey = xts_aesni_setkey,
1441 .encrypt = xts_encrypt,
1442 .decrypt = xts_decrypt,
1443 },
1444 },
1445}, {
1446 .cra_name = "lrw(aes)",
1447 .cra_driver_name = "lrw-aes-aesni",
fa46ccb8
JK
1448 .cra_priority = 400,
1449 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1450 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1451 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1452 .cra_alignmask = 0,
1453 .cra_type = &crypto_ablkcipher_type,
1454 .cra_module = THIS_MODULE,
023af608 1455 .cra_init = ablk_init,
fa46ccb8
JK
1456 .cra_exit = ablk_exit,
1457 .cra_u = {
1458 .ablkcipher = {
023af608
JK
1459 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1460 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
fa46ccb8
JK
1461 .ivsize = AES_BLOCK_SIZE,
1462 .setkey = ablk_set_key,
1463 .encrypt = ablk_encrypt,
1464 .decrypt = ablk_decrypt,
1465 },
1466 },
fa46ccb8
JK
1467}, {
1468 .cra_name = "xts(aes)",
1469 .cra_driver_name = "xts-aes-aesni",
1470 .cra_priority = 400,
1471 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1472 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1473 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1474 .cra_alignmask = 0,
1475 .cra_type = &crypto_ablkcipher_type,
1476 .cra_module = THIS_MODULE,
023af608 1477 .cra_init = ablk_init,
fa46ccb8
JK
1478 .cra_exit = ablk_exit,
1479 .cra_u = {
1480 .ablkcipher = {
1481 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1482 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1483 .ivsize = AES_BLOCK_SIZE,
1484 .setkey = ablk_set_key,
1485 .encrypt = ablk_encrypt,
1486 .decrypt = ablk_decrypt,
1487 },
1488 },
fa46ccb8 1489} };
0bd82f5f 1490
3bd391f0
AK
1491
1492static const struct x86_cpu_id aesni_cpu_id[] = {
1493 X86_FEATURE_MATCH(X86_FEATURE_AES),
1494 {}
1495};
1496MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1497
54b6a1bd
HY
1498static int __init aesni_init(void)
1499{
7af6c245 1500 int err;
54b6a1bd 1501
3bd391f0 1502 if (!x86_match_cpu(aesni_cpu_id))
54b6a1bd 1503 return -ENODEV;
8610d7bf 1504#ifdef CONFIG_X86_64
d764593a
TC
1505#ifdef CONFIG_AS_AVX2
1506 if (boot_cpu_has(X86_FEATURE_AVX2)) {
1507 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1508 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1509 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1510 } else
1511#endif
1512#ifdef CONFIG_AS_AVX
1513 if (boot_cpu_has(X86_FEATURE_AVX)) {
1514 pr_info("AVX version of gcm_enc/dec engaged.\n");
1515 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1516 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1517 } else
1518#endif
1519 {
1520 pr_info("SSE version of gcm_enc/dec engaged.\n");
1521 aesni_gcm_enc_tfm = aesni_gcm_enc;
1522 aesni_gcm_dec_tfm = aesni_gcm_dec;
1523 }
22cddcc7 1524 aesni_ctr_enc_tfm = aesni_ctr_enc;
5cfed7b3 1525#ifdef CONFIG_AS_AVX
22cddcc7 1526 if (cpu_has_avx) {
1527 /* optimize performance of ctr mode encryption transform */
1528 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1529 pr_info("AES CTR mode by8 optimization enabled\n");
1530 }
1531#endif
8610d7bf 1532#endif
0bd82f5f 1533
fa46ccb8
JK
1534 err = crypto_fpu_init();
1535 if (err)
1536 return err;
54b6a1bd 1537
fa46ccb8 1538 return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
54b6a1bd
HY
1539}
1540
1541static void __exit aesni_exit(void)
1542{
fa46ccb8 1543 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
b23b6451
AL
1544
1545 crypto_fpu_exit();
54b6a1bd
HY
1546}
1547
1548module_init(aesni_init);
1549module_exit(aesni_exit);
1550
1551MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1552MODULE_LICENSE("GPL");
1553MODULE_ALIAS("aes");
This page took 0.279412 seconds and 5 git commands to generate.