crypto: ablk_helper - move ablk_* functions from serpent-sse2/avx glue code to shared...
[deliverable/linux.git] / arch / x86 / crypto / twofish_avx_glue.c
CommitLineData
107778b5
JG
1/*
2 * Glue Code for AVX assembler version of Twofish Cipher
3 *
4 * Copyright (C) 2012 Johannes Goetzfried
5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
6 *
7 * Glue code based on serpent_sse2_glue.c by:
8 * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23 * USA
24 *
25 */
26
27#include <linux/module.h>
28#include <linux/hardirq.h>
29#include <linux/types.h>
30#include <linux/crypto.h>
31#include <linux/err.h>
32#include <crypto/algapi.h>
33#include <crypto/twofish.h>
34#include <crypto/cryptd.h>
35#include <crypto/b128ops.h>
36#include <crypto/ctr.h>
37#include <crypto/lrw.h>
38#include <crypto/xts.h>
39#include <asm/i387.h>
40#include <asm/xcr.h>
41#include <asm/xsave.h>
42#include <crypto/scatterwalk.h>
43#include <linux/workqueue.h>
44#include <linux/spinlock.h>
45
46
47#define TWOFISH_PARALLEL_BLOCKS 8
48
49/* regular block cipher functions from twofish_x86_64 module */
50asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
51 const u8 *src);
52asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
53 const u8 *src);
54
55/* 3-way parallel cipher functions from twofish_x86_64-3way module */
56asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
57 const u8 *src, bool xor);
58asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst,
59 const u8 *src);
60
61static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
62 const u8 *src)
63{
64 __twofish_enc_blk_3way(ctx, dst, src, false);
65}
66
67static inline void twofish_enc_blk_3way_xor(struct twofish_ctx *ctx, u8 *dst,
68 const u8 *src)
69{
70 __twofish_enc_blk_3way(ctx, dst, src, true);
71}
72
73/* 8-way parallel cipher functions */
74asmlinkage void __twofish_enc_blk_8way(struct twofish_ctx *ctx, u8 *dst,
75 const u8 *src, bool xor);
76asmlinkage void twofish_dec_blk_8way(struct twofish_ctx *ctx, u8 *dst,
77 const u8 *src);
78
79static inline void twofish_enc_blk_xway(struct twofish_ctx *ctx, u8 *dst,
80 const u8 *src)
81{
82 __twofish_enc_blk_8way(ctx, dst, src, false);
83}
84
85static inline void twofish_enc_blk_xway_xor(struct twofish_ctx *ctx, u8 *dst,
86 const u8 *src)
87{
88 __twofish_enc_blk_8way(ctx, dst, src, true);
89}
90
91static inline void twofish_dec_blk_xway(struct twofish_ctx *ctx, u8 *dst,
92 const u8 *src)
93{
94 twofish_dec_blk_8way(ctx, dst, src);
95}
96
97
98
99struct async_twofish_ctx {
100 struct cryptd_ablkcipher *cryptd_tfm;
101};
102
103static inline bool twofish_fpu_begin(bool fpu_enabled, unsigned int nbytes)
104{
105 if (fpu_enabled)
106 return true;
107
108 /* AVX is only used when chunk to be processed is large enough, so
109 * do not enable FPU until it is necessary.
110 */
111 if (nbytes < TF_BLOCK_SIZE * TWOFISH_PARALLEL_BLOCKS)
112 return false;
113
114 kernel_fpu_begin();
115 return true;
116}
117
118static inline void twofish_fpu_end(bool fpu_enabled)
119{
120 if (fpu_enabled)
121 kernel_fpu_end();
122}
123
124static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
125 bool enc)
126{
127 bool fpu_enabled = false;
128 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
129 const unsigned int bsize = TF_BLOCK_SIZE;
130 unsigned int nbytes;
131 int err;
132
133 err = blkcipher_walk_virt(desc, walk);
134 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
135
136 while ((nbytes = walk->nbytes)) {
137 u8 *wsrc = walk->src.virt.addr;
138 u8 *wdst = walk->dst.virt.addr;
139
140 fpu_enabled = twofish_fpu_begin(fpu_enabled, nbytes);
141
142 /* Process multi-block batch */
143 if (nbytes >= bsize * TWOFISH_PARALLEL_BLOCKS) {
144 do {
145 if (enc)
146 twofish_enc_blk_xway(ctx, wdst, wsrc);
147 else
148 twofish_dec_blk_xway(ctx, wdst, wsrc);
149
150 wsrc += bsize * TWOFISH_PARALLEL_BLOCKS;
151 wdst += bsize * TWOFISH_PARALLEL_BLOCKS;
152 nbytes -= bsize * TWOFISH_PARALLEL_BLOCKS;
153 } while (nbytes >= bsize * TWOFISH_PARALLEL_BLOCKS);
154
155 if (nbytes < bsize)
156 goto done;
157 }
158
159 /* Process three block batch */
160 if (nbytes >= bsize * 3) {
161 do {
162 if (enc)
163 twofish_enc_blk_3way(ctx, wdst, wsrc);
164 else
165 twofish_dec_blk_3way(ctx, wdst, wsrc);
166
167 wsrc += bsize * 3;
168 wdst += bsize * 3;
169 nbytes -= bsize * 3;
170 } while (nbytes >= bsize * 3);
171
172 if (nbytes < bsize)
173 goto done;
174 }
175
176 /* Handle leftovers */
177 do {
178 if (enc)
179 twofish_enc_blk(ctx, wdst, wsrc);
180 else
181 twofish_dec_blk(ctx, wdst, wsrc);
182
183 wsrc += bsize;
184 wdst += bsize;
185 nbytes -= bsize;
186 } while (nbytes >= bsize);
187
188done:
189 err = blkcipher_walk_done(desc, walk, nbytes);
190 }
191
192 twofish_fpu_end(fpu_enabled);
193 return err;
194}
195
196static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
197 struct scatterlist *src, unsigned int nbytes)
198{
199 struct blkcipher_walk walk;
200
201 blkcipher_walk_init(&walk, dst, src, nbytes);
202 return ecb_crypt(desc, &walk, true);
203}
204
205static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
206 struct scatterlist *src, unsigned int nbytes)
207{
208 struct blkcipher_walk walk;
209
210 blkcipher_walk_init(&walk, dst, src, nbytes);
211 return ecb_crypt(desc, &walk, false);
212}
213
214static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
215 struct blkcipher_walk *walk)
216{
217 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
218 const unsigned int bsize = TF_BLOCK_SIZE;
219 unsigned int nbytes = walk->nbytes;
220 u128 *src = (u128 *)walk->src.virt.addr;
221 u128 *dst = (u128 *)walk->dst.virt.addr;
222 u128 *iv = (u128 *)walk->iv;
223
224 do {
225 u128_xor(dst, src, iv);
226 twofish_enc_blk(ctx, (u8 *)dst, (u8 *)dst);
227 iv = dst;
228
229 src += 1;
230 dst += 1;
231 nbytes -= bsize;
232 } while (nbytes >= bsize);
233
234 u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv);
235 return nbytes;
236}
237
238static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
239 struct scatterlist *src, unsigned int nbytes)
240{
241 struct blkcipher_walk walk;
242 int err;
243
244 blkcipher_walk_init(&walk, dst, src, nbytes);
245 err = blkcipher_walk_virt(desc, &walk);
246
247 while ((nbytes = walk.nbytes)) {
248 nbytes = __cbc_encrypt(desc, &walk);
249 err = blkcipher_walk_done(desc, &walk, nbytes);
250 }
251
252 return err;
253}
254
255static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
256 struct blkcipher_walk *walk)
257{
258 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
259 const unsigned int bsize = TF_BLOCK_SIZE;
260 unsigned int nbytes = walk->nbytes;
261 u128 *src = (u128 *)walk->src.virt.addr;
262 u128 *dst = (u128 *)walk->dst.virt.addr;
263 u128 ivs[TWOFISH_PARALLEL_BLOCKS - 1];
264 u128 last_iv;
265 int i;
266
267 /* Start of the last block. */
268 src += nbytes / bsize - 1;
269 dst += nbytes / bsize - 1;
270
271 last_iv = *src;
272
273 /* Process multi-block batch */
274 if (nbytes >= bsize * TWOFISH_PARALLEL_BLOCKS) {
275 do {
276 nbytes -= bsize * (TWOFISH_PARALLEL_BLOCKS - 1);
277 src -= TWOFISH_PARALLEL_BLOCKS - 1;
278 dst -= TWOFISH_PARALLEL_BLOCKS - 1;
279
280 for (i = 0; i < TWOFISH_PARALLEL_BLOCKS - 1; i++)
281 ivs[i] = src[i];
282
283 twofish_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
284
285 for (i = 0; i < TWOFISH_PARALLEL_BLOCKS - 1; i++)
286 u128_xor(dst + (i + 1), dst + (i + 1), ivs + i);
287
288 nbytes -= bsize;
289 if (nbytes < bsize)
290 goto done;
291
292 u128_xor(dst, dst, src - 1);
293 src -= 1;
294 dst -= 1;
295 } while (nbytes >= bsize * TWOFISH_PARALLEL_BLOCKS);
296
297 if (nbytes < bsize)
298 goto done;
299 }
300
301 /* Process three block batch */
302 if (nbytes >= bsize * 3) {
303 do {
304 nbytes -= bsize * (3 - 1);
305 src -= 3 - 1;
306 dst -= 3 - 1;
307
308 ivs[0] = src[0];
309 ivs[1] = src[1];
310
311 twofish_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src);
312
313 u128_xor(dst + 1, dst + 1, ivs + 0);
314 u128_xor(dst + 2, dst + 2, ivs + 1);
315
316 nbytes -= bsize;
317 if (nbytes < bsize)
318 goto done;
319
320 u128_xor(dst, dst, src - 1);
321 src -= 1;
322 dst -= 1;
323 } while (nbytes >= bsize * 3);
324
325 if (nbytes < bsize)
326 goto done;
327 }
328
329 /* Handle leftovers */
330 for (;;) {
331 twofish_dec_blk(ctx, (u8 *)dst, (u8 *)src);
332
333 nbytes -= bsize;
334 if (nbytes < bsize)
335 break;
336
337 u128_xor(dst, dst, src - 1);
338 src -= 1;
339 dst -= 1;
340 }
341
342done:
343 u128_xor(dst, dst, (u128 *)walk->iv);
344 *(u128 *)walk->iv = last_iv;
345
346 return nbytes;
347}
348
349static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
350 struct scatterlist *src, unsigned int nbytes)
351{
352 bool fpu_enabled = false;
353 struct blkcipher_walk walk;
354 int err;
355
356 blkcipher_walk_init(&walk, dst, src, nbytes);
357 err = blkcipher_walk_virt(desc, &walk);
358 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
359
360 while ((nbytes = walk.nbytes)) {
361 fpu_enabled = twofish_fpu_begin(fpu_enabled, nbytes);
362 nbytes = __cbc_decrypt(desc, &walk);
363 err = blkcipher_walk_done(desc, &walk, nbytes);
364 }
365
366 twofish_fpu_end(fpu_enabled);
367 return err;
368}
369
370static inline void u128_to_be128(be128 *dst, const u128 *src)
371{
372 dst->a = cpu_to_be64(src->a);
373 dst->b = cpu_to_be64(src->b);
374}
375
376static inline void be128_to_u128(u128 *dst, const be128 *src)
377{
378 dst->a = be64_to_cpu(src->a);
379 dst->b = be64_to_cpu(src->b);
380}
381
382static inline void u128_inc(u128 *i)
383{
384 i->b++;
385 if (!i->b)
386 i->a++;
387}
388
389static void ctr_crypt_final(struct blkcipher_desc *desc,
390 struct blkcipher_walk *walk)
391{
392 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
393 u8 *ctrblk = walk->iv;
394 u8 keystream[TF_BLOCK_SIZE];
395 u8 *src = walk->src.virt.addr;
396 u8 *dst = walk->dst.virt.addr;
397 unsigned int nbytes = walk->nbytes;
398
399 twofish_enc_blk(ctx, keystream, ctrblk);
400 crypto_xor(keystream, src, nbytes);
401 memcpy(dst, keystream, nbytes);
402
403 crypto_inc(ctrblk, TF_BLOCK_SIZE);
404}
405
406static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
407 struct blkcipher_walk *walk)
408{
409 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
410 const unsigned int bsize = TF_BLOCK_SIZE;
411 unsigned int nbytes = walk->nbytes;
412 u128 *src = (u128 *)walk->src.virt.addr;
413 u128 *dst = (u128 *)walk->dst.virt.addr;
414 u128 ctrblk;
415 be128 ctrblocks[TWOFISH_PARALLEL_BLOCKS];
416 int i;
417
418 be128_to_u128(&ctrblk, (be128 *)walk->iv);
419
420 /* Process multi-block batch */
421 if (nbytes >= bsize * TWOFISH_PARALLEL_BLOCKS) {
422 do {
423 /* create ctrblks for parallel encrypt */
424 for (i = 0; i < TWOFISH_PARALLEL_BLOCKS; i++) {
425 if (dst != src)
426 dst[i] = src[i];
427
428 u128_to_be128(&ctrblocks[i], &ctrblk);
429 u128_inc(&ctrblk);
430 }
431
432 twofish_enc_blk_xway_xor(ctx, (u8 *)dst,
433 (u8 *)ctrblocks);
434
435 src += TWOFISH_PARALLEL_BLOCKS;
436 dst += TWOFISH_PARALLEL_BLOCKS;
437 nbytes -= bsize * TWOFISH_PARALLEL_BLOCKS;
438 } while (nbytes >= bsize * TWOFISH_PARALLEL_BLOCKS);
439
440 if (nbytes < bsize)
441 goto done;
442 }
443
444 /* Process three block batch */
445 if (nbytes >= bsize * 3) {
446 do {
447 if (dst != src) {
448 dst[0] = src[0];
449 dst[1] = src[1];
450 dst[2] = src[2];
451 }
452
453 /* create ctrblks for parallel encrypt */
454 u128_to_be128(&ctrblocks[0], &ctrblk);
455 u128_inc(&ctrblk);
456 u128_to_be128(&ctrblocks[1], &ctrblk);
457 u128_inc(&ctrblk);
458 u128_to_be128(&ctrblocks[2], &ctrblk);
459 u128_inc(&ctrblk);
460
461 twofish_enc_blk_3way_xor(ctx, (u8 *)dst,
462 (u8 *)ctrblocks);
463
464 src += 3;
465 dst += 3;
466 nbytes -= bsize * 3;
467 } while (nbytes >= bsize * 3);
468
469 if (nbytes < bsize)
470 goto done;
471 }
472
473 /* Handle leftovers */
474 do {
475 if (dst != src)
476 *dst = *src;
477
478 u128_to_be128(&ctrblocks[0], &ctrblk);
479 u128_inc(&ctrblk);
480
481 twofish_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
482 u128_xor(dst, dst, (u128 *)ctrblocks);
483
484 src += 1;
485 dst += 1;
486 nbytes -= bsize;
487 } while (nbytes >= bsize);
488
489done:
490 u128_to_be128((be128 *)walk->iv, &ctrblk);
491 return nbytes;
492}
493
494static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
495 struct scatterlist *src, unsigned int nbytes)
496{
497 bool fpu_enabled = false;
498 struct blkcipher_walk walk;
499 int err;
500
501 blkcipher_walk_init(&walk, dst, src, nbytes);
502 err = blkcipher_walk_virt_block(desc, &walk, TF_BLOCK_SIZE);
503 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
504
505 while ((nbytes = walk.nbytes) >= TF_BLOCK_SIZE) {
506 fpu_enabled = twofish_fpu_begin(fpu_enabled, nbytes);
507 nbytes = __ctr_crypt(desc, &walk);
508 err = blkcipher_walk_done(desc, &walk, nbytes);
509 }
510
511 twofish_fpu_end(fpu_enabled);
512
513 if (walk.nbytes) {
514 ctr_crypt_final(desc, &walk);
515 err = blkcipher_walk_done(desc, &walk, 0);
516 }
517
518 return err;
519}
520
521struct crypt_priv {
522 struct twofish_ctx *ctx;
523 bool fpu_enabled;
524};
525
526static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
527{
528 const unsigned int bsize = TF_BLOCK_SIZE;
529 struct crypt_priv *ctx = priv;
530 int i;
531
532 ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes);
533
534 if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
535 twofish_enc_blk_xway(ctx->ctx, srcdst, srcdst);
536 return;
537 }
538
539 for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
540 twofish_enc_blk_3way(ctx->ctx, srcdst, srcdst);
541
542 nbytes %= bsize * 3;
543
544 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
545 twofish_enc_blk(ctx->ctx, srcdst, srcdst);
546}
547
548static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
549{
550 const unsigned int bsize = TF_BLOCK_SIZE;
551 struct crypt_priv *ctx = priv;
552 int i;
553
554 ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes);
555
556 if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
557 twofish_dec_blk_xway(ctx->ctx, srcdst, srcdst);
558 return;
559 }
560
561 for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
562 twofish_dec_blk_3way(ctx->ctx, srcdst, srcdst);
563
564 nbytes %= bsize * 3;
565
566 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
567 twofish_dec_blk(ctx->ctx, srcdst, srcdst);
568}
569
570struct twofish_lrw_ctx {
571 struct lrw_table_ctx lrw_table;
572 struct twofish_ctx twofish_ctx;
573};
574
575static int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
576 unsigned int keylen)
577{
578 struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
579 int err;
580
581 err = __twofish_setkey(&ctx->twofish_ctx, key,
582 keylen - TF_BLOCK_SIZE, &tfm->crt_flags);
583 if (err)
584 return err;
585
586 return lrw_init_table(&ctx->lrw_table, key + keylen -
587 TF_BLOCK_SIZE);
588}
589
590static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
591 struct scatterlist *src, unsigned int nbytes)
592{
593 struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
594 be128 buf[TWOFISH_PARALLEL_BLOCKS];
595 struct crypt_priv crypt_ctx = {
596 .ctx = &ctx->twofish_ctx,
597 .fpu_enabled = false,
598 };
599 struct lrw_crypt_req req = {
600 .tbuf = buf,
601 .tbuflen = sizeof(buf),
602
603 .table_ctx = &ctx->lrw_table,
604 .crypt_ctx = &crypt_ctx,
605 .crypt_fn = encrypt_callback,
606 };
607 int ret;
608
609 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
610 ret = lrw_crypt(desc, dst, src, nbytes, &req);
611 twofish_fpu_end(crypt_ctx.fpu_enabled);
612
613 return ret;
614}
615
616static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
617 struct scatterlist *src, unsigned int nbytes)
618{
619 struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
620 be128 buf[TWOFISH_PARALLEL_BLOCKS];
621 struct crypt_priv crypt_ctx = {
622 .ctx = &ctx->twofish_ctx,
623 .fpu_enabled = false,
624 };
625 struct lrw_crypt_req req = {
626 .tbuf = buf,
627 .tbuflen = sizeof(buf),
628
629 .table_ctx = &ctx->lrw_table,
630 .crypt_ctx = &crypt_ctx,
631 .crypt_fn = decrypt_callback,
632 };
633 int ret;
634
635 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
636 ret = lrw_crypt(desc, dst, src, nbytes, &req);
637 twofish_fpu_end(crypt_ctx.fpu_enabled);
638
639 return ret;
640}
641
642static void lrw_exit_tfm(struct crypto_tfm *tfm)
643{
644 struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
645
646 lrw_free_table(&ctx->lrw_table);
647}
648
649struct twofish_xts_ctx {
650 struct twofish_ctx tweak_ctx;
651 struct twofish_ctx crypt_ctx;
652};
653
654static int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
655 unsigned int keylen)
656{
657 struct twofish_xts_ctx *ctx = crypto_tfm_ctx(tfm);
658 u32 *flags = &tfm->crt_flags;
659 int err;
660
661 /* key consists of keys of equal size concatenated, therefore
662 * the length must be even
663 */
664 if (keylen % 2) {
665 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
666 return -EINVAL;
667 }
668
669 /* first half of xts-key is for crypt */
670 err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
671 if (err)
672 return err;
673
674 /* second half of xts-key is for tweak */
675 return __twofish_setkey(&ctx->tweak_ctx,
676 key + keylen / 2, keylen / 2, flags);
677}
678
679static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
680 struct scatterlist *src, unsigned int nbytes)
681{
682 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
683 be128 buf[TWOFISH_PARALLEL_BLOCKS];
684 struct crypt_priv crypt_ctx = {
685 .ctx = &ctx->crypt_ctx,
686 .fpu_enabled = false,
687 };
688 struct xts_crypt_req req = {
689 .tbuf = buf,
690 .tbuflen = sizeof(buf),
691
692 .tweak_ctx = &ctx->tweak_ctx,
693 .tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
694 .crypt_ctx = &crypt_ctx,
695 .crypt_fn = encrypt_callback,
696 };
697 int ret;
698
699 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
700 ret = xts_crypt(desc, dst, src, nbytes, &req);
701 twofish_fpu_end(crypt_ctx.fpu_enabled);
702
703 return ret;
704}
705
706static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
707 struct scatterlist *src, unsigned int nbytes)
708{
709 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
710 be128 buf[TWOFISH_PARALLEL_BLOCKS];
711 struct crypt_priv crypt_ctx = {
712 .ctx = &ctx->crypt_ctx,
713 .fpu_enabled = false,
714 };
715 struct xts_crypt_req req = {
716 .tbuf = buf,
717 .tbuflen = sizeof(buf),
718
719 .tweak_ctx = &ctx->tweak_ctx,
720 .tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
721 .crypt_ctx = &crypt_ctx,
722 .crypt_fn = decrypt_callback,
723 };
724 int ret;
725
726 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
727 ret = xts_crypt(desc, dst, src, nbytes, &req);
728 twofish_fpu_end(crypt_ctx.fpu_enabled);
729
730 return ret;
731}
732
733static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
734 unsigned int key_len)
735{
736 struct async_twofish_ctx *ctx = crypto_ablkcipher_ctx(tfm);
737 struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
738 int err;
739
740 crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
741 crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
742 & CRYPTO_TFM_REQ_MASK);
743 err = crypto_ablkcipher_setkey(child, key, key_len);
744 crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
745 & CRYPTO_TFM_RES_MASK);
746 return err;
747}
748
749static int __ablk_encrypt(struct ablkcipher_request *req)
750{
751 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
752 struct async_twofish_ctx *ctx = crypto_ablkcipher_ctx(tfm);
753 struct blkcipher_desc desc;
754
755 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
756 desc.info = req->info;
757 desc.flags = 0;
758
759 return crypto_blkcipher_crt(desc.tfm)->encrypt(
760 &desc, req->dst, req->src, req->nbytes);
761}
762
763static int ablk_encrypt(struct ablkcipher_request *req)
764{
765 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
766 struct async_twofish_ctx *ctx = crypto_ablkcipher_ctx(tfm);
767
768 if (!irq_fpu_usable()) {
769 struct ablkcipher_request *cryptd_req =
770 ablkcipher_request_ctx(req);
771
772 memcpy(cryptd_req, req, sizeof(*req));
773 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
774
775 return crypto_ablkcipher_encrypt(cryptd_req);
776 } else {
777 return __ablk_encrypt(req);
778 }
779}
780
781static int ablk_decrypt(struct ablkcipher_request *req)
782{
783 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
784 struct async_twofish_ctx *ctx = crypto_ablkcipher_ctx(tfm);
785
786 if (!irq_fpu_usable()) {
787 struct ablkcipher_request *cryptd_req =
788 ablkcipher_request_ctx(req);
789
790 memcpy(cryptd_req, req, sizeof(*req));
791 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
792
793 return crypto_ablkcipher_decrypt(cryptd_req);
794 } else {
795 struct blkcipher_desc desc;
796
797 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
798 desc.info = req->info;
799 desc.flags = 0;
800
801 return crypto_blkcipher_crt(desc.tfm)->decrypt(
802 &desc, req->dst, req->src, req->nbytes);
803 }
804}
805
806static void ablk_exit(struct crypto_tfm *tfm)
807{
808 struct async_twofish_ctx *ctx = crypto_tfm_ctx(tfm);
809
810 cryptd_free_ablkcipher(ctx->cryptd_tfm);
811}
812
813static int ablk_init(struct crypto_tfm *tfm)
814{
815 struct async_twofish_ctx *ctx = crypto_tfm_ctx(tfm);
816 struct cryptd_ablkcipher *cryptd_tfm;
817 char drv_name[CRYPTO_MAX_ALG_NAME];
818
819 snprintf(drv_name, sizeof(drv_name), "__driver-%s",
820 crypto_tfm_alg_driver_name(tfm));
821
822 cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0);
823 if (IS_ERR(cryptd_tfm))
824 return PTR_ERR(cryptd_tfm);
825
826 ctx->cryptd_tfm = cryptd_tfm;
827 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
828 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
829
830 return 0;
831}
832
833static struct crypto_alg twofish_algs[10] = { {
834 .cra_name = "__ecb-twofish-avx",
835 .cra_driver_name = "__driver-ecb-twofish-avx",
836 .cra_priority = 0,
837 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
838 .cra_blocksize = TF_BLOCK_SIZE,
839 .cra_ctxsize = sizeof(struct twofish_ctx),
840 .cra_alignmask = 0,
841 .cra_type = &crypto_blkcipher_type,
842 .cra_module = THIS_MODULE,
843 .cra_list = LIST_HEAD_INIT(twofish_algs[0].cra_list),
844 .cra_u = {
845 .blkcipher = {
846 .min_keysize = TF_MIN_KEY_SIZE,
847 .max_keysize = TF_MAX_KEY_SIZE,
848 .setkey = twofish_setkey,
849 .encrypt = ecb_encrypt,
850 .decrypt = ecb_decrypt,
851 },
852 },
853}, {
854 .cra_name = "__cbc-twofish-avx",
855 .cra_driver_name = "__driver-cbc-twofish-avx",
856 .cra_priority = 0,
857 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
858 .cra_blocksize = TF_BLOCK_SIZE,
859 .cra_ctxsize = sizeof(struct twofish_ctx),
860 .cra_alignmask = 0,
861 .cra_type = &crypto_blkcipher_type,
862 .cra_module = THIS_MODULE,
863 .cra_list = LIST_HEAD_INIT(twofish_algs[1].cra_list),
864 .cra_u = {
865 .blkcipher = {
866 .min_keysize = TF_MIN_KEY_SIZE,
867 .max_keysize = TF_MAX_KEY_SIZE,
868 .setkey = twofish_setkey,
869 .encrypt = cbc_encrypt,
870 .decrypt = cbc_decrypt,
871 },
872 },
873}, {
874 .cra_name = "__ctr-twofish-avx",
875 .cra_driver_name = "__driver-ctr-twofish-avx",
876 .cra_priority = 0,
877 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
878 .cra_blocksize = 1,
879 .cra_ctxsize = sizeof(struct twofish_ctx),
880 .cra_alignmask = 0,
881 .cra_type = &crypto_blkcipher_type,
882 .cra_module = THIS_MODULE,
883 .cra_list = LIST_HEAD_INIT(twofish_algs[2].cra_list),
884 .cra_u = {
885 .blkcipher = {
886 .min_keysize = TF_MIN_KEY_SIZE,
887 .max_keysize = TF_MAX_KEY_SIZE,
888 .ivsize = TF_BLOCK_SIZE,
889 .setkey = twofish_setkey,
890 .encrypt = ctr_crypt,
891 .decrypt = ctr_crypt,
892 },
893 },
894}, {
895 .cra_name = "__lrw-twofish-avx",
896 .cra_driver_name = "__driver-lrw-twofish-avx",
897 .cra_priority = 0,
898 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
899 .cra_blocksize = TF_BLOCK_SIZE,
900 .cra_ctxsize = sizeof(struct twofish_lrw_ctx),
901 .cra_alignmask = 0,
902 .cra_type = &crypto_blkcipher_type,
903 .cra_module = THIS_MODULE,
904 .cra_list = LIST_HEAD_INIT(twofish_algs[3].cra_list),
905 .cra_exit = lrw_exit_tfm,
906 .cra_u = {
907 .blkcipher = {
908 .min_keysize = TF_MIN_KEY_SIZE +
909 TF_BLOCK_SIZE,
910 .max_keysize = TF_MAX_KEY_SIZE +
911 TF_BLOCK_SIZE,
912 .ivsize = TF_BLOCK_SIZE,
913 .setkey = lrw_twofish_setkey,
914 .encrypt = lrw_encrypt,
915 .decrypt = lrw_decrypt,
916 },
917 },
918}, {
919 .cra_name = "__xts-twofish-avx",
920 .cra_driver_name = "__driver-xts-twofish-avx",
921 .cra_priority = 0,
922 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
923 .cra_blocksize = TF_BLOCK_SIZE,
924 .cra_ctxsize = sizeof(struct twofish_xts_ctx),
925 .cra_alignmask = 0,
926 .cra_type = &crypto_blkcipher_type,
927 .cra_module = THIS_MODULE,
928 .cra_list = LIST_HEAD_INIT(twofish_algs[4].cra_list),
929 .cra_u = {
930 .blkcipher = {
931 .min_keysize = TF_MIN_KEY_SIZE * 2,
932 .max_keysize = TF_MAX_KEY_SIZE * 2,
933 .ivsize = TF_BLOCK_SIZE,
934 .setkey = xts_twofish_setkey,
935 .encrypt = xts_encrypt,
936 .decrypt = xts_decrypt,
937 },
938 },
939}, {
940 .cra_name = "ecb(twofish)",
941 .cra_driver_name = "ecb-twofish-avx",
942 .cra_priority = 400,
943 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
944 .cra_blocksize = TF_BLOCK_SIZE,
945 .cra_ctxsize = sizeof(struct async_twofish_ctx),
946 .cra_alignmask = 0,
947 .cra_type = &crypto_ablkcipher_type,
948 .cra_module = THIS_MODULE,
949 .cra_list = LIST_HEAD_INIT(twofish_algs[5].cra_list),
950 .cra_init = ablk_init,
951 .cra_exit = ablk_exit,
952 .cra_u = {
953 .ablkcipher = {
954 .min_keysize = TF_MIN_KEY_SIZE,
955 .max_keysize = TF_MAX_KEY_SIZE,
956 .setkey = ablk_set_key,
957 .encrypt = ablk_encrypt,
958 .decrypt = ablk_decrypt,
959 },
960 },
961}, {
962 .cra_name = "cbc(twofish)",
963 .cra_driver_name = "cbc-twofish-avx",
964 .cra_priority = 400,
965 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
966 .cra_blocksize = TF_BLOCK_SIZE,
967 .cra_ctxsize = sizeof(struct async_twofish_ctx),
968 .cra_alignmask = 0,
969 .cra_type = &crypto_ablkcipher_type,
970 .cra_module = THIS_MODULE,
971 .cra_list = LIST_HEAD_INIT(twofish_algs[6].cra_list),
972 .cra_init = ablk_init,
973 .cra_exit = ablk_exit,
974 .cra_u = {
975 .ablkcipher = {
976 .min_keysize = TF_MIN_KEY_SIZE,
977 .max_keysize = TF_MAX_KEY_SIZE,
978 .ivsize = TF_BLOCK_SIZE,
979 .setkey = ablk_set_key,
980 .encrypt = __ablk_encrypt,
981 .decrypt = ablk_decrypt,
982 },
983 },
984}, {
985 .cra_name = "ctr(twofish)",
986 .cra_driver_name = "ctr-twofish-avx",
987 .cra_priority = 400,
988 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
989 .cra_blocksize = 1,
990 .cra_ctxsize = sizeof(struct async_twofish_ctx),
991 .cra_alignmask = 0,
992 .cra_type = &crypto_ablkcipher_type,
993 .cra_module = THIS_MODULE,
994 .cra_list = LIST_HEAD_INIT(twofish_algs[7].cra_list),
995 .cra_init = ablk_init,
996 .cra_exit = ablk_exit,
997 .cra_u = {
998 .ablkcipher = {
999 .min_keysize = TF_MIN_KEY_SIZE,
1000 .max_keysize = TF_MAX_KEY_SIZE,
1001 .ivsize = TF_BLOCK_SIZE,
1002 .setkey = ablk_set_key,
1003 .encrypt = ablk_encrypt,
1004 .decrypt = ablk_encrypt,
1005 .geniv = "chainiv",
1006 },
1007 },
1008}, {
1009 .cra_name = "lrw(twofish)",
1010 .cra_driver_name = "lrw-twofish-avx",
1011 .cra_priority = 400,
1012 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1013 .cra_blocksize = TF_BLOCK_SIZE,
1014 .cra_ctxsize = sizeof(struct async_twofish_ctx),
1015 .cra_alignmask = 0,
1016 .cra_type = &crypto_ablkcipher_type,
1017 .cra_module = THIS_MODULE,
1018 .cra_list = LIST_HEAD_INIT(twofish_algs[8].cra_list),
1019 .cra_init = ablk_init,
1020 .cra_exit = ablk_exit,
1021 .cra_u = {
1022 .ablkcipher = {
1023 .min_keysize = TF_MIN_KEY_SIZE +
1024 TF_BLOCK_SIZE,
1025 .max_keysize = TF_MAX_KEY_SIZE +
1026 TF_BLOCK_SIZE,
1027 .ivsize = TF_BLOCK_SIZE,
1028 .setkey = ablk_set_key,
1029 .encrypt = ablk_encrypt,
1030 .decrypt = ablk_decrypt,
1031 },
1032 },
1033}, {
1034 .cra_name = "xts(twofish)",
1035 .cra_driver_name = "xts-twofish-avx",
1036 .cra_priority = 400,
1037 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1038 .cra_blocksize = TF_BLOCK_SIZE,
1039 .cra_ctxsize = sizeof(struct async_twofish_ctx),
1040 .cra_alignmask = 0,
1041 .cra_type = &crypto_ablkcipher_type,
1042 .cra_module = THIS_MODULE,
1043 .cra_list = LIST_HEAD_INIT(twofish_algs[9].cra_list),
1044 .cra_init = ablk_init,
1045 .cra_exit = ablk_exit,
1046 .cra_u = {
1047 .ablkcipher = {
1048 .min_keysize = TF_MIN_KEY_SIZE * 2,
1049 .max_keysize = TF_MAX_KEY_SIZE * 2,
1050 .ivsize = TF_BLOCK_SIZE,
1051 .setkey = ablk_set_key,
1052 .encrypt = ablk_encrypt,
1053 .decrypt = ablk_decrypt,
1054 },
1055 },
1056} };
1057
1058static int __init twofish_init(void)
1059{
1060 u64 xcr0;
1061
1062 if (!cpu_has_avx || !cpu_has_osxsave) {
1063 printk(KERN_INFO "AVX instructions are not detected.\n");
1064 return -ENODEV;
1065 }
1066
1067 xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
1068 if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
1069 printk(KERN_INFO "AVX detected but unusable.\n");
1070 return -ENODEV;
1071 }
1072
1073 return crypto_register_algs(twofish_algs, ARRAY_SIZE(twofish_algs));
1074}
1075
1076static void __exit twofish_exit(void)
1077{
1078 crypto_unregister_algs(twofish_algs, ARRAY_SIZE(twofish_algs));
1079}
1080
1081module_init(twofish_init);
1082module_exit(twofish_exit);
1083
1084MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX optimized");
1085MODULE_LICENSE("GPL");
1086MODULE_ALIAS("twofish");
This page took 0.122272 seconds and 5 git commands to generate.