crypto: twofish-x86_64-3way - remove unneeded LRW/XTS #ifdefs
[deliverable/linux.git] / arch / x86 / crypto / twofish_glue_3way.c
1 /*
2 * Glue Code for 3-way parallel assembler optimized version of Twofish
3 *
4 * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
5 *
6 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8 * CTR part based on code (crypto/ctr.c) by:
9 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
24 * USA
25 *
26 */
27
28 #include <linux/crypto.h>
29 #include <linux/init.h>
30 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <crypto/algapi.h>
33 #include <crypto/twofish.h>
34 #include <crypto/b128ops.h>
35 #include <crypto/lrw.h>
36 #include <crypto/xts.h>
37
38 /* regular block cipher functions from twofish_x86_64 module */
39 asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
40 const u8 *src);
41 asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
42 const u8 *src);
43
44 /* 3-way parallel cipher functions */
45 asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
46 const u8 *src, bool xor);
47 asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst,
48 const u8 *src);
49
50 static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
51 const u8 *src)
52 {
53 __twofish_enc_blk_3way(ctx, dst, src, false);
54 }
55
56 static inline void twofish_enc_blk_xor_3way(struct twofish_ctx *ctx, u8 *dst,
57 const u8 *src)
58 {
59 __twofish_enc_blk_3way(ctx, dst, src, true);
60 }
61
62 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
63 void (*fn)(struct twofish_ctx *, u8 *, const u8 *),
64 void (*fn_3way)(struct twofish_ctx *, u8 *, const u8 *))
65 {
66 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
67 unsigned int bsize = TF_BLOCK_SIZE;
68 unsigned int nbytes;
69 int err;
70
71 err = blkcipher_walk_virt(desc, walk);
72
73 while ((nbytes = walk->nbytes)) {
74 u8 *wsrc = walk->src.virt.addr;
75 u8 *wdst = walk->dst.virt.addr;
76
77 /* Process three block batch */
78 if (nbytes >= bsize * 3) {
79 do {
80 fn_3way(ctx, wdst, wsrc);
81
82 wsrc += bsize * 3;
83 wdst += bsize * 3;
84 nbytes -= bsize * 3;
85 } while (nbytes >= bsize * 3);
86
87 if (nbytes < bsize)
88 goto done;
89 }
90
91 /* Handle leftovers */
92 do {
93 fn(ctx, wdst, wsrc);
94
95 wsrc += bsize;
96 wdst += bsize;
97 nbytes -= bsize;
98 } while (nbytes >= bsize);
99
100 done:
101 err = blkcipher_walk_done(desc, walk, nbytes);
102 }
103
104 return err;
105 }
106
107 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
108 struct scatterlist *src, unsigned int nbytes)
109 {
110 struct blkcipher_walk walk;
111
112 blkcipher_walk_init(&walk, dst, src, nbytes);
113 return ecb_crypt(desc, &walk, twofish_enc_blk, twofish_enc_blk_3way);
114 }
115
116 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
117 struct scatterlist *src, unsigned int nbytes)
118 {
119 struct blkcipher_walk walk;
120
121 blkcipher_walk_init(&walk, dst, src, nbytes);
122 return ecb_crypt(desc, &walk, twofish_dec_blk, twofish_dec_blk_3way);
123 }
124
125 static struct crypto_alg blk_ecb_alg = {
126 .cra_name = "ecb(twofish)",
127 .cra_driver_name = "ecb-twofish-3way",
128 .cra_priority = 300,
129 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
130 .cra_blocksize = TF_BLOCK_SIZE,
131 .cra_ctxsize = sizeof(struct twofish_ctx),
132 .cra_alignmask = 0,
133 .cra_type = &crypto_blkcipher_type,
134 .cra_module = THIS_MODULE,
135 .cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
136 .cra_u = {
137 .blkcipher = {
138 .min_keysize = TF_MIN_KEY_SIZE,
139 .max_keysize = TF_MAX_KEY_SIZE,
140 .setkey = twofish_setkey,
141 .encrypt = ecb_encrypt,
142 .decrypt = ecb_decrypt,
143 },
144 },
145 };
146
147 static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
148 struct blkcipher_walk *walk)
149 {
150 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
151 unsigned int bsize = TF_BLOCK_SIZE;
152 unsigned int nbytes = walk->nbytes;
153 u128 *src = (u128 *)walk->src.virt.addr;
154 u128 *dst = (u128 *)walk->dst.virt.addr;
155 u128 *iv = (u128 *)walk->iv;
156
157 do {
158 u128_xor(dst, src, iv);
159 twofish_enc_blk(ctx, (u8 *)dst, (u8 *)dst);
160 iv = dst;
161
162 src += 1;
163 dst += 1;
164 nbytes -= bsize;
165 } while (nbytes >= bsize);
166
167 u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv);
168 return nbytes;
169 }
170
171 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
172 struct scatterlist *src, unsigned int nbytes)
173 {
174 struct blkcipher_walk walk;
175 int err;
176
177 blkcipher_walk_init(&walk, dst, src, nbytes);
178 err = blkcipher_walk_virt(desc, &walk);
179
180 while ((nbytes = walk.nbytes)) {
181 nbytes = __cbc_encrypt(desc, &walk);
182 err = blkcipher_walk_done(desc, &walk, nbytes);
183 }
184
185 return err;
186 }
187
188 static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
189 struct blkcipher_walk *walk)
190 {
191 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
192 unsigned int bsize = TF_BLOCK_SIZE;
193 unsigned int nbytes = walk->nbytes;
194 u128 *src = (u128 *)walk->src.virt.addr;
195 u128 *dst = (u128 *)walk->dst.virt.addr;
196 u128 ivs[3 - 1];
197 u128 last_iv;
198
199 /* Start of the last block. */
200 src += nbytes / bsize - 1;
201 dst += nbytes / bsize - 1;
202
203 last_iv = *src;
204
205 /* Process three block batch */
206 if (nbytes >= bsize * 3) {
207 do {
208 nbytes -= bsize * (3 - 1);
209 src -= 3 - 1;
210 dst -= 3 - 1;
211
212 ivs[0] = src[0];
213 ivs[1] = src[1];
214
215 twofish_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src);
216
217 u128_xor(dst + 1, dst + 1, ivs + 0);
218 u128_xor(dst + 2, dst + 2, ivs + 1);
219
220 nbytes -= bsize;
221 if (nbytes < bsize)
222 goto done;
223
224 u128_xor(dst, dst, src - 1);
225 src -= 1;
226 dst -= 1;
227 } while (nbytes >= bsize * 3);
228
229 if (nbytes < bsize)
230 goto done;
231 }
232
233 /* Handle leftovers */
234 for (;;) {
235 twofish_dec_blk(ctx, (u8 *)dst, (u8 *)src);
236
237 nbytes -= bsize;
238 if (nbytes < bsize)
239 break;
240
241 u128_xor(dst, dst, src - 1);
242 src -= 1;
243 dst -= 1;
244 }
245
246 done:
247 u128_xor(dst, dst, (u128 *)walk->iv);
248 *(u128 *)walk->iv = last_iv;
249
250 return nbytes;
251 }
252
253 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
254 struct scatterlist *src, unsigned int nbytes)
255 {
256 struct blkcipher_walk walk;
257 int err;
258
259 blkcipher_walk_init(&walk, dst, src, nbytes);
260 err = blkcipher_walk_virt(desc, &walk);
261
262 while ((nbytes = walk.nbytes)) {
263 nbytes = __cbc_decrypt(desc, &walk);
264 err = blkcipher_walk_done(desc, &walk, nbytes);
265 }
266
267 return err;
268 }
269
270 static struct crypto_alg blk_cbc_alg = {
271 .cra_name = "cbc(twofish)",
272 .cra_driver_name = "cbc-twofish-3way",
273 .cra_priority = 300,
274 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
275 .cra_blocksize = TF_BLOCK_SIZE,
276 .cra_ctxsize = sizeof(struct twofish_ctx),
277 .cra_alignmask = 0,
278 .cra_type = &crypto_blkcipher_type,
279 .cra_module = THIS_MODULE,
280 .cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
281 .cra_u = {
282 .blkcipher = {
283 .min_keysize = TF_MIN_KEY_SIZE,
284 .max_keysize = TF_MAX_KEY_SIZE,
285 .ivsize = TF_BLOCK_SIZE,
286 .setkey = twofish_setkey,
287 .encrypt = cbc_encrypt,
288 .decrypt = cbc_decrypt,
289 },
290 },
291 };
292
293 static inline void u128_to_be128(be128 *dst, const u128 *src)
294 {
295 dst->a = cpu_to_be64(src->a);
296 dst->b = cpu_to_be64(src->b);
297 }
298
299 static inline void be128_to_u128(u128 *dst, const be128 *src)
300 {
301 dst->a = be64_to_cpu(src->a);
302 dst->b = be64_to_cpu(src->b);
303 }
304
305 static inline void u128_inc(u128 *i)
306 {
307 i->b++;
308 if (!i->b)
309 i->a++;
310 }
311
312 static void ctr_crypt_final(struct blkcipher_desc *desc,
313 struct blkcipher_walk *walk)
314 {
315 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
316 u8 *ctrblk = walk->iv;
317 u8 keystream[TF_BLOCK_SIZE];
318 u8 *src = walk->src.virt.addr;
319 u8 *dst = walk->dst.virt.addr;
320 unsigned int nbytes = walk->nbytes;
321
322 twofish_enc_blk(ctx, keystream, ctrblk);
323 crypto_xor(keystream, src, nbytes);
324 memcpy(dst, keystream, nbytes);
325
326 crypto_inc(ctrblk, TF_BLOCK_SIZE);
327 }
328
329 static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
330 struct blkcipher_walk *walk)
331 {
332 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
333 unsigned int bsize = TF_BLOCK_SIZE;
334 unsigned int nbytes = walk->nbytes;
335 u128 *src = (u128 *)walk->src.virt.addr;
336 u128 *dst = (u128 *)walk->dst.virt.addr;
337 u128 ctrblk;
338 be128 ctrblocks[3];
339
340 be128_to_u128(&ctrblk, (be128 *)walk->iv);
341
342 /* Process three block batch */
343 if (nbytes >= bsize * 3) {
344 do {
345 if (dst != src) {
346 dst[0] = src[0];
347 dst[1] = src[1];
348 dst[2] = src[2];
349 }
350
351 /* create ctrblks for parallel encrypt */
352 u128_to_be128(&ctrblocks[0], &ctrblk);
353 u128_inc(&ctrblk);
354 u128_to_be128(&ctrblocks[1], &ctrblk);
355 u128_inc(&ctrblk);
356 u128_to_be128(&ctrblocks[2], &ctrblk);
357 u128_inc(&ctrblk);
358
359 twofish_enc_blk_xor_3way(ctx, (u8 *)dst,
360 (u8 *)ctrblocks);
361
362 src += 3;
363 dst += 3;
364 nbytes -= bsize * 3;
365 } while (nbytes >= bsize * 3);
366
367 if (nbytes < bsize)
368 goto done;
369 }
370
371 /* Handle leftovers */
372 do {
373 if (dst != src)
374 *dst = *src;
375
376 u128_to_be128(&ctrblocks[0], &ctrblk);
377 u128_inc(&ctrblk);
378
379 twofish_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
380 u128_xor(dst, dst, (u128 *)ctrblocks);
381
382 src += 1;
383 dst += 1;
384 nbytes -= bsize;
385 } while (nbytes >= bsize);
386
387 done:
388 u128_to_be128((be128 *)walk->iv, &ctrblk);
389 return nbytes;
390 }
391
392 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
393 struct scatterlist *src, unsigned int nbytes)
394 {
395 struct blkcipher_walk walk;
396 int err;
397
398 blkcipher_walk_init(&walk, dst, src, nbytes);
399 err = blkcipher_walk_virt_block(desc, &walk, TF_BLOCK_SIZE);
400
401 while ((nbytes = walk.nbytes) >= TF_BLOCK_SIZE) {
402 nbytes = __ctr_crypt(desc, &walk);
403 err = blkcipher_walk_done(desc, &walk, nbytes);
404 }
405
406 if (walk.nbytes) {
407 ctr_crypt_final(desc, &walk);
408 err = blkcipher_walk_done(desc, &walk, 0);
409 }
410
411 return err;
412 }
413
414 static struct crypto_alg blk_ctr_alg = {
415 .cra_name = "ctr(twofish)",
416 .cra_driver_name = "ctr-twofish-3way",
417 .cra_priority = 300,
418 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
419 .cra_blocksize = 1,
420 .cra_ctxsize = sizeof(struct twofish_ctx),
421 .cra_alignmask = 0,
422 .cra_type = &crypto_blkcipher_type,
423 .cra_module = THIS_MODULE,
424 .cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
425 .cra_u = {
426 .blkcipher = {
427 .min_keysize = TF_MIN_KEY_SIZE,
428 .max_keysize = TF_MAX_KEY_SIZE,
429 .ivsize = TF_BLOCK_SIZE,
430 .setkey = twofish_setkey,
431 .encrypt = ctr_crypt,
432 .decrypt = ctr_crypt,
433 },
434 },
435 };
436
437 static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
438 {
439 const unsigned int bsize = TF_BLOCK_SIZE;
440 struct twofish_ctx *ctx = priv;
441 int i;
442
443 if (nbytes == 3 * bsize) {
444 twofish_enc_blk_3way(ctx, srcdst, srcdst);
445 return;
446 }
447
448 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
449 twofish_enc_blk(ctx, srcdst, srcdst);
450 }
451
452 static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
453 {
454 const unsigned int bsize = TF_BLOCK_SIZE;
455 struct twofish_ctx *ctx = priv;
456 int i;
457
458 if (nbytes == 3 * bsize) {
459 twofish_dec_blk_3way(ctx, srcdst, srcdst);
460 return;
461 }
462
463 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
464 twofish_dec_blk(ctx, srcdst, srcdst);
465 }
466
467 struct twofish_lrw_ctx {
468 struct lrw_table_ctx lrw_table;
469 struct twofish_ctx twofish_ctx;
470 };
471
472 static int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
473 unsigned int keylen)
474 {
475 struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
476 int err;
477
478 err = __twofish_setkey(&ctx->twofish_ctx, key, keylen - TF_BLOCK_SIZE,
479 &tfm->crt_flags);
480 if (err)
481 return err;
482
483 return lrw_init_table(&ctx->lrw_table, key + keylen - TF_BLOCK_SIZE);
484 }
485
486 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
487 struct scatterlist *src, unsigned int nbytes)
488 {
489 struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
490 be128 buf[3];
491 struct lrw_crypt_req req = {
492 .tbuf = buf,
493 .tbuflen = sizeof(buf),
494
495 .table_ctx = &ctx->lrw_table,
496 .crypt_ctx = &ctx->twofish_ctx,
497 .crypt_fn = encrypt_callback,
498 };
499
500 return lrw_crypt(desc, dst, src, nbytes, &req);
501 }
502
503 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
504 struct scatterlist *src, unsigned int nbytes)
505 {
506 struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
507 be128 buf[3];
508 struct lrw_crypt_req req = {
509 .tbuf = buf,
510 .tbuflen = sizeof(buf),
511
512 .table_ctx = &ctx->lrw_table,
513 .crypt_ctx = &ctx->twofish_ctx,
514 .crypt_fn = decrypt_callback,
515 };
516
517 return lrw_crypt(desc, dst, src, nbytes, &req);
518 }
519
520 static void lrw_exit_tfm(struct crypto_tfm *tfm)
521 {
522 struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
523
524 lrw_free_table(&ctx->lrw_table);
525 }
526
527 static struct crypto_alg blk_lrw_alg = {
528 .cra_name = "lrw(twofish)",
529 .cra_driver_name = "lrw-twofish-3way",
530 .cra_priority = 300,
531 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
532 .cra_blocksize = TF_BLOCK_SIZE,
533 .cra_ctxsize = sizeof(struct twofish_lrw_ctx),
534 .cra_alignmask = 0,
535 .cra_type = &crypto_blkcipher_type,
536 .cra_module = THIS_MODULE,
537 .cra_list = LIST_HEAD_INIT(blk_lrw_alg.cra_list),
538 .cra_exit = lrw_exit_tfm,
539 .cra_u = {
540 .blkcipher = {
541 .min_keysize = TF_MIN_KEY_SIZE + TF_BLOCK_SIZE,
542 .max_keysize = TF_MAX_KEY_SIZE + TF_BLOCK_SIZE,
543 .ivsize = TF_BLOCK_SIZE,
544 .setkey = lrw_twofish_setkey,
545 .encrypt = lrw_encrypt,
546 .decrypt = lrw_decrypt,
547 },
548 },
549 };
550
551 struct twofish_xts_ctx {
552 struct twofish_ctx tweak_ctx;
553 struct twofish_ctx crypt_ctx;
554 };
555
556 static int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
557 unsigned int keylen)
558 {
559 struct twofish_xts_ctx *ctx = crypto_tfm_ctx(tfm);
560 u32 *flags = &tfm->crt_flags;
561 int err;
562
563 /* key consists of keys of equal size concatenated, therefore
564 * the length must be even
565 */
566 if (keylen % 2) {
567 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
568 return -EINVAL;
569 }
570
571 /* first half of xts-key is for crypt */
572 err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
573 if (err)
574 return err;
575
576 /* second half of xts-key is for tweak */
577 return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
578 flags);
579 }
580
581 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
582 struct scatterlist *src, unsigned int nbytes)
583 {
584 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
585 be128 buf[3];
586 struct xts_crypt_req req = {
587 .tbuf = buf,
588 .tbuflen = sizeof(buf),
589
590 .tweak_ctx = &ctx->tweak_ctx,
591 .tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
592 .crypt_ctx = &ctx->crypt_ctx,
593 .crypt_fn = encrypt_callback,
594 };
595
596 return xts_crypt(desc, dst, src, nbytes, &req);
597 }
598
599 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
600 struct scatterlist *src, unsigned int nbytes)
601 {
602 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
603 be128 buf[3];
604 struct xts_crypt_req req = {
605 .tbuf = buf,
606 .tbuflen = sizeof(buf),
607
608 .tweak_ctx = &ctx->tweak_ctx,
609 .tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
610 .crypt_ctx = &ctx->crypt_ctx,
611 .crypt_fn = decrypt_callback,
612 };
613
614 return xts_crypt(desc, dst, src, nbytes, &req);
615 }
616
617 static struct crypto_alg blk_xts_alg = {
618 .cra_name = "xts(twofish)",
619 .cra_driver_name = "xts-twofish-3way",
620 .cra_priority = 300,
621 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
622 .cra_blocksize = TF_BLOCK_SIZE,
623 .cra_ctxsize = sizeof(struct twofish_xts_ctx),
624 .cra_alignmask = 0,
625 .cra_type = &crypto_blkcipher_type,
626 .cra_module = THIS_MODULE,
627 .cra_list = LIST_HEAD_INIT(blk_xts_alg.cra_list),
628 .cra_u = {
629 .blkcipher = {
630 .min_keysize = TF_MIN_KEY_SIZE * 2,
631 .max_keysize = TF_MAX_KEY_SIZE * 2,
632 .ivsize = TF_BLOCK_SIZE,
633 .setkey = xts_twofish_setkey,
634 .encrypt = xts_encrypt,
635 .decrypt = xts_decrypt,
636 },
637 },
638 };
639
640 int __init init(void)
641 {
642 int err;
643
644 err = crypto_register_alg(&blk_ecb_alg);
645 if (err)
646 goto ecb_err;
647 err = crypto_register_alg(&blk_cbc_alg);
648 if (err)
649 goto cbc_err;
650 err = crypto_register_alg(&blk_ctr_alg);
651 if (err)
652 goto ctr_err;
653 err = crypto_register_alg(&blk_lrw_alg);
654 if (err)
655 goto blk_lrw_err;
656 err = crypto_register_alg(&blk_xts_alg);
657 if (err)
658 goto blk_xts_err;
659
660 return 0;
661
662 crypto_unregister_alg(&blk_xts_alg);
663 blk_xts_err:
664 crypto_unregister_alg(&blk_lrw_alg);
665 blk_lrw_err:
666 crypto_unregister_alg(&blk_ctr_alg);
667 ctr_err:
668 crypto_unregister_alg(&blk_cbc_alg);
669 cbc_err:
670 crypto_unregister_alg(&blk_ecb_alg);
671 ecb_err:
672 return err;
673 }
674
675 void __exit fini(void)
676 {
677 crypto_unregister_alg(&blk_xts_alg);
678 crypto_unregister_alg(&blk_lrw_alg);
679 crypto_unregister_alg(&blk_ctr_alg);
680 crypto_unregister_alg(&blk_cbc_alg);
681 crypto_unregister_alg(&blk_ecb_alg);
682 }
683
684 module_init(init);
685 module_exit(fini);
686
687 MODULE_LICENSE("GPL");
688 MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized");
689 MODULE_ALIAS("twofish");
690 MODULE_ALIAS("twofish-asm");
This page took 0.067287 seconds and 5 git commands to generate.