2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
5 * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
7 * This file is released under the GPL.
10 #include <linux/completion.h>
11 #include <linux/err.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/bio.h>
16 #include <linux/blkdev.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/crypto.h>
20 #include <linux/workqueue.h>
21 #include <linux/backing-dev.h>
22 #include <linux/percpu.h>
23 #include <linux/atomic.h>
24 #include <linux/scatterlist.h>
26 #include <asm/unaligned.h>
27 #include <crypto/hash.h>
28 #include <crypto/md5.h>
29 #include <crypto/algapi.h>
31 #include <linux/device-mapper.h>
33 #define DM_MSG_PREFIX "crypt"
36 * context holding the current state of a multi-part conversion
38 struct convert_context
{
39 struct completion restart
;
42 unsigned int offset_in
;
43 unsigned int offset_out
;
51 * per bio private data
54 struct crypt_config
*cc
;
56 struct work_struct work
;
58 struct convert_context ctx
;
63 struct dm_crypt_io
*base_io
;
66 struct dm_crypt_request
{
67 struct convert_context
*ctx
;
68 struct scatterlist sg_in
;
69 struct scatterlist sg_out
;
75 struct crypt_iv_operations
{
76 int (*ctr
)(struct crypt_config
*cc
, struct dm_target
*ti
,
78 void (*dtr
)(struct crypt_config
*cc
);
79 int (*init
)(struct crypt_config
*cc
);
80 int (*wipe
)(struct crypt_config
*cc
);
81 int (*generator
)(struct crypt_config
*cc
, u8
*iv
,
82 struct dm_crypt_request
*dmreq
);
83 int (*post
)(struct crypt_config
*cc
, u8
*iv
,
84 struct dm_crypt_request
*dmreq
);
87 struct iv_essiv_private
{
88 struct crypto_hash
*hash_tfm
;
92 struct iv_benbi_private
{
96 #define LMK_SEED_SIZE 64 /* hash + 0 */
97 struct iv_lmk_private
{
98 struct crypto_shash
*hash_tfm
;
102 #define TCW_WHITENING_SIZE 16
103 struct iv_tcw_private
{
104 struct crypto_shash
*crc32_tfm
;
110 * Crypt: maps a linear range of a block device
111 * and encrypts / decrypts at the same time.
113 enum flags
{ DM_CRYPT_SUSPENDED
, DM_CRYPT_KEY_VALID
};
116 * Duplicated per-CPU state for cipher.
119 struct ablkcipher_request
*req
;
123 * The fields in here must be read only after initialization,
124 * changing state should be in crypt_cpu.
126 struct crypt_config
{
131 * pool for per bio private data, crypto requests and
132 * encryption requeusts/buffer pages
136 mempool_t
*page_pool
;
139 struct workqueue_struct
*io_queue
;
140 struct workqueue_struct
*crypt_queue
;
145 struct crypt_iv_operations
*iv_gen_ops
;
147 struct iv_essiv_private essiv
;
148 struct iv_benbi_private benbi
;
149 struct iv_lmk_private lmk
;
150 struct iv_tcw_private tcw
;
153 unsigned int iv_size
;
156 * Duplicated per cpu state. Access through
157 * per_cpu_ptr() only.
159 struct crypt_cpu __percpu
*cpu
;
161 /* ESSIV: struct crypto_cipher *essiv_tfm */
163 struct crypto_ablkcipher
**tfms
;
167 * Layout of each crypto request:
169 * struct ablkcipher_request
172 * struct dm_crypt_request
176 * The padding is added so that dm_crypt_request and the IV are
179 unsigned int dmreq_start
;
182 unsigned int key_size
;
183 unsigned int key_parts
; /* independent parts in key buffer */
184 unsigned int key_extra_size
; /* additional keys length */
189 #define MIN_POOL_PAGES 32
191 static struct kmem_cache
*_crypt_io_pool
;
193 static void clone_init(struct dm_crypt_io
*, struct bio
*);
194 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
);
195 static u8
*iv_of_dmreq(struct crypt_config
*cc
, struct dm_crypt_request
*dmreq
);
197 static struct crypt_cpu
*this_crypt_config(struct crypt_config
*cc
)
199 return this_cpu_ptr(cc
->cpu
);
203 * Use this to access cipher attributes that are the same for each CPU.
205 static struct crypto_ablkcipher
*any_tfm(struct crypt_config
*cc
)
211 * Different IV generation algorithms:
213 * plain: the initial vector is the 32-bit little-endian version of the sector
214 * number, padded with zeros if necessary.
216 * plain64: the initial vector is the 64-bit little-endian version of the sector
217 * number, padded with zeros if necessary.
219 * essiv: "encrypted sector|salt initial vector", the sector number is
220 * encrypted with the bulk cipher using a salt as key. The salt
221 * should be derived from the bulk cipher's key via hashing.
223 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
224 * (needed for LRW-32-AES and possible other narrow block modes)
226 * null: the initial vector is always zero. Provides compatibility with
227 * obsolete loop_fish2 devices. Do not use for new devices.
229 * lmk: Compatible implementation of the block chaining mode used
230 * by the Loop-AES block device encryption system
231 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
232 * It operates on full 512 byte sectors and uses CBC
233 * with an IV derived from the sector number, the data and
234 * optionally extra IV seed.
235 * This means that after decryption the first block
236 * of sector must be tweaked according to decrypted data.
237 * Loop-AES can use three encryption schemes:
238 * version 1: is plain aes-cbc mode
239 * version 2: uses 64 multikey scheme with lmk IV generator
240 * version 3: the same as version 2 with additional IV seed
241 * (it uses 65 keys, last key is used as IV seed)
243 * tcw: Compatible implementation of the block chaining mode used
244 * by the TrueCrypt device encryption system (prior to version 4.1).
245 * For more info see: http://www.truecrypt.org
246 * It operates on full 512 byte sectors and uses CBC
247 * with an IV derived from initial key and the sector number.
248 * In addition, whitening value is applied on every sector, whitening
249 * is calculated from initial key, sector number and mixed using CRC32.
250 * Note that this encryption scheme is vulnerable to watermarking attacks
251 * and should be used for old compatible containers access only.
253 * plumb: unimplemented, see:
254 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
257 static int crypt_iv_plain_gen(struct crypt_config
*cc
, u8
*iv
,
258 struct dm_crypt_request
*dmreq
)
260 memset(iv
, 0, cc
->iv_size
);
261 *(__le32
*)iv
= cpu_to_le32(dmreq
->iv_sector
& 0xffffffff);
266 static int crypt_iv_plain64_gen(struct crypt_config
*cc
, u8
*iv
,
267 struct dm_crypt_request
*dmreq
)
269 memset(iv
, 0, cc
->iv_size
);
270 *(__le64
*)iv
= cpu_to_le64(dmreq
->iv_sector
);
275 /* Initialise ESSIV - compute salt but no local memory allocations */
276 static int crypt_iv_essiv_init(struct crypt_config
*cc
)
278 struct iv_essiv_private
*essiv
= &cc
->iv_gen_private
.essiv
;
279 struct hash_desc desc
;
280 struct scatterlist sg
;
281 struct crypto_cipher
*essiv_tfm
;
284 sg_init_one(&sg
, cc
->key
, cc
->key_size
);
285 desc
.tfm
= essiv
->hash_tfm
;
286 desc
.flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
288 err
= crypto_hash_digest(&desc
, &sg
, cc
->key_size
, essiv
->salt
);
292 essiv_tfm
= cc
->iv_private
;
294 err
= crypto_cipher_setkey(essiv_tfm
, essiv
->salt
,
295 crypto_hash_digestsize(essiv
->hash_tfm
));
302 /* Wipe salt and reset key derived from volume key */
303 static int crypt_iv_essiv_wipe(struct crypt_config
*cc
)
305 struct iv_essiv_private
*essiv
= &cc
->iv_gen_private
.essiv
;
306 unsigned salt_size
= crypto_hash_digestsize(essiv
->hash_tfm
);
307 struct crypto_cipher
*essiv_tfm
;
310 memset(essiv
->salt
, 0, salt_size
);
312 essiv_tfm
= cc
->iv_private
;
313 r
= crypto_cipher_setkey(essiv_tfm
, essiv
->salt
, salt_size
);
320 /* Set up per cpu cipher state */
321 static struct crypto_cipher
*setup_essiv_cpu(struct crypt_config
*cc
,
322 struct dm_target
*ti
,
323 u8
*salt
, unsigned saltsize
)
325 struct crypto_cipher
*essiv_tfm
;
328 /* Setup the essiv_tfm with the given salt */
329 essiv_tfm
= crypto_alloc_cipher(cc
->cipher
, 0, CRYPTO_ALG_ASYNC
);
330 if (IS_ERR(essiv_tfm
)) {
331 ti
->error
= "Error allocating crypto tfm for ESSIV";
335 if (crypto_cipher_blocksize(essiv_tfm
) !=
336 crypto_ablkcipher_ivsize(any_tfm(cc
))) {
337 ti
->error
= "Block size of ESSIV cipher does "
338 "not match IV size of block cipher";
339 crypto_free_cipher(essiv_tfm
);
340 return ERR_PTR(-EINVAL
);
343 err
= crypto_cipher_setkey(essiv_tfm
, salt
, saltsize
);
345 ti
->error
= "Failed to set key for ESSIV cipher";
346 crypto_free_cipher(essiv_tfm
);
353 static void crypt_iv_essiv_dtr(struct crypt_config
*cc
)
355 struct crypto_cipher
*essiv_tfm
;
356 struct iv_essiv_private
*essiv
= &cc
->iv_gen_private
.essiv
;
358 crypto_free_hash(essiv
->hash_tfm
);
359 essiv
->hash_tfm
= NULL
;
364 essiv_tfm
= cc
->iv_private
;
367 crypto_free_cipher(essiv_tfm
);
369 cc
->iv_private
= NULL
;
372 static int crypt_iv_essiv_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
375 struct crypto_cipher
*essiv_tfm
= NULL
;
376 struct crypto_hash
*hash_tfm
= NULL
;
381 ti
->error
= "Digest algorithm missing for ESSIV mode";
385 /* Allocate hash algorithm */
386 hash_tfm
= crypto_alloc_hash(opts
, 0, CRYPTO_ALG_ASYNC
);
387 if (IS_ERR(hash_tfm
)) {
388 ti
->error
= "Error initializing ESSIV hash";
389 err
= PTR_ERR(hash_tfm
);
393 salt
= kzalloc(crypto_hash_digestsize(hash_tfm
), GFP_KERNEL
);
395 ti
->error
= "Error kmallocing salt storage in ESSIV";
400 cc
->iv_gen_private
.essiv
.salt
= salt
;
401 cc
->iv_gen_private
.essiv
.hash_tfm
= hash_tfm
;
403 essiv_tfm
= setup_essiv_cpu(cc
, ti
, salt
,
404 crypto_hash_digestsize(hash_tfm
));
405 if (IS_ERR(essiv_tfm
)) {
406 crypt_iv_essiv_dtr(cc
);
407 return PTR_ERR(essiv_tfm
);
409 cc
->iv_private
= essiv_tfm
;
414 if (hash_tfm
&& !IS_ERR(hash_tfm
))
415 crypto_free_hash(hash_tfm
);
420 static int crypt_iv_essiv_gen(struct crypt_config
*cc
, u8
*iv
,
421 struct dm_crypt_request
*dmreq
)
423 struct crypto_cipher
*essiv_tfm
= cc
->iv_private
;
425 memset(iv
, 0, cc
->iv_size
);
426 *(__le64
*)iv
= cpu_to_le64(dmreq
->iv_sector
);
427 crypto_cipher_encrypt_one(essiv_tfm
, iv
, iv
);
432 static int crypt_iv_benbi_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
435 unsigned bs
= crypto_ablkcipher_blocksize(any_tfm(cc
));
438 /* we need to calculate how far we must shift the sector count
439 * to get the cipher block count, we use this shift in _gen */
441 if (1 << log
!= bs
) {
442 ti
->error
= "cypher blocksize is not a power of 2";
447 ti
->error
= "cypher blocksize is > 512";
451 cc
->iv_gen_private
.benbi
.shift
= 9 - log
;
456 static void crypt_iv_benbi_dtr(struct crypt_config
*cc
)
460 static int crypt_iv_benbi_gen(struct crypt_config
*cc
, u8
*iv
,
461 struct dm_crypt_request
*dmreq
)
465 memset(iv
, 0, cc
->iv_size
- sizeof(u64
)); /* rest is cleared below */
467 val
= cpu_to_be64(((u64
)dmreq
->iv_sector
<< cc
->iv_gen_private
.benbi
.shift
) + 1);
468 put_unaligned(val
, (__be64
*)(iv
+ cc
->iv_size
- sizeof(u64
)));
473 static int crypt_iv_null_gen(struct crypt_config
*cc
, u8
*iv
,
474 struct dm_crypt_request
*dmreq
)
476 memset(iv
, 0, cc
->iv_size
);
481 static void crypt_iv_lmk_dtr(struct crypt_config
*cc
)
483 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
485 if (lmk
->hash_tfm
&& !IS_ERR(lmk
->hash_tfm
))
486 crypto_free_shash(lmk
->hash_tfm
);
487 lmk
->hash_tfm
= NULL
;
493 static int crypt_iv_lmk_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
496 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
498 lmk
->hash_tfm
= crypto_alloc_shash("md5", 0, 0);
499 if (IS_ERR(lmk
->hash_tfm
)) {
500 ti
->error
= "Error initializing LMK hash";
501 return PTR_ERR(lmk
->hash_tfm
);
504 /* No seed in LMK version 2 */
505 if (cc
->key_parts
== cc
->tfms_count
) {
510 lmk
->seed
= kzalloc(LMK_SEED_SIZE
, GFP_KERNEL
);
512 crypt_iv_lmk_dtr(cc
);
513 ti
->error
= "Error kmallocing seed storage in LMK";
520 static int crypt_iv_lmk_init(struct crypt_config
*cc
)
522 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
523 int subkey_size
= cc
->key_size
/ cc
->key_parts
;
525 /* LMK seed is on the position of LMK_KEYS + 1 key */
527 memcpy(lmk
->seed
, cc
->key
+ (cc
->tfms_count
* subkey_size
),
528 crypto_shash_digestsize(lmk
->hash_tfm
));
533 static int crypt_iv_lmk_wipe(struct crypt_config
*cc
)
535 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
538 memset(lmk
->seed
, 0, LMK_SEED_SIZE
);
543 static int crypt_iv_lmk_one(struct crypt_config
*cc
, u8
*iv
,
544 struct dm_crypt_request
*dmreq
,
547 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
549 struct shash_desc desc
;
550 char ctx
[crypto_shash_descsize(lmk
->hash_tfm
)];
552 struct md5_state md5state
;
556 sdesc
.desc
.tfm
= lmk
->hash_tfm
;
557 sdesc
.desc
.flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
559 r
= crypto_shash_init(&sdesc
.desc
);
564 r
= crypto_shash_update(&sdesc
.desc
, lmk
->seed
, LMK_SEED_SIZE
);
569 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
570 r
= crypto_shash_update(&sdesc
.desc
, data
+ 16, 16 * 31);
574 /* Sector is cropped to 56 bits here */
575 buf
[0] = cpu_to_le32(dmreq
->iv_sector
& 0xFFFFFFFF);
576 buf
[1] = cpu_to_le32((((u64
)dmreq
->iv_sector
>> 32) & 0x00FFFFFF) | 0x80000000);
577 buf
[2] = cpu_to_le32(4024);
579 r
= crypto_shash_update(&sdesc
.desc
, (u8
*)buf
, sizeof(buf
));
583 /* No MD5 padding here */
584 r
= crypto_shash_export(&sdesc
.desc
, &md5state
);
588 for (i
= 0; i
< MD5_HASH_WORDS
; i
++)
589 __cpu_to_le32s(&md5state
.hash
[i
]);
590 memcpy(iv
, &md5state
.hash
, cc
->iv_size
);
595 static int crypt_iv_lmk_gen(struct crypt_config
*cc
, u8
*iv
,
596 struct dm_crypt_request
*dmreq
)
601 if (bio_data_dir(dmreq
->ctx
->bio_in
) == WRITE
) {
602 src
= kmap_atomic(sg_page(&dmreq
->sg_in
));
603 r
= crypt_iv_lmk_one(cc
, iv
, dmreq
, src
+ dmreq
->sg_in
.offset
);
606 memset(iv
, 0, cc
->iv_size
);
611 static int crypt_iv_lmk_post(struct crypt_config
*cc
, u8
*iv
,
612 struct dm_crypt_request
*dmreq
)
617 if (bio_data_dir(dmreq
->ctx
->bio_in
) == WRITE
)
620 dst
= kmap_atomic(sg_page(&dmreq
->sg_out
));
621 r
= crypt_iv_lmk_one(cc
, iv
, dmreq
, dst
+ dmreq
->sg_out
.offset
);
623 /* Tweak the first block of plaintext sector */
625 crypto_xor(dst
+ dmreq
->sg_out
.offset
, iv
, cc
->iv_size
);
631 static void crypt_iv_tcw_dtr(struct crypt_config
*cc
)
633 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
635 kzfree(tcw
->iv_seed
);
637 kzfree(tcw
->whitening
);
638 tcw
->whitening
= NULL
;
640 if (tcw
->crc32_tfm
&& !IS_ERR(tcw
->crc32_tfm
))
641 crypto_free_shash(tcw
->crc32_tfm
);
642 tcw
->crc32_tfm
= NULL
;
645 static int crypt_iv_tcw_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
648 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
650 if (cc
->key_size
<= (cc
->iv_size
+ TCW_WHITENING_SIZE
)) {
651 ti
->error
= "Wrong key size for TCW";
655 tcw
->crc32_tfm
= crypto_alloc_shash("crc32", 0, 0);
656 if (IS_ERR(tcw
->crc32_tfm
)) {
657 ti
->error
= "Error initializing CRC32 in TCW";
658 return PTR_ERR(tcw
->crc32_tfm
);
661 tcw
->iv_seed
= kzalloc(cc
->iv_size
, GFP_KERNEL
);
662 tcw
->whitening
= kzalloc(TCW_WHITENING_SIZE
, GFP_KERNEL
);
663 if (!tcw
->iv_seed
|| !tcw
->whitening
) {
664 crypt_iv_tcw_dtr(cc
);
665 ti
->error
= "Error allocating seed storage in TCW";
672 static int crypt_iv_tcw_init(struct crypt_config
*cc
)
674 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
675 int key_offset
= cc
->key_size
- cc
->iv_size
- TCW_WHITENING_SIZE
;
677 memcpy(tcw
->iv_seed
, &cc
->key
[key_offset
], cc
->iv_size
);
678 memcpy(tcw
->whitening
, &cc
->key
[key_offset
+ cc
->iv_size
],
684 static int crypt_iv_tcw_wipe(struct crypt_config
*cc
)
686 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
688 memset(tcw
->iv_seed
, 0, cc
->iv_size
);
689 memset(tcw
->whitening
, 0, TCW_WHITENING_SIZE
);
694 static int crypt_iv_tcw_whitening(struct crypt_config
*cc
,
695 struct dm_crypt_request
*dmreq
,
698 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
699 u64 sector
= cpu_to_le64((u64
)dmreq
->iv_sector
);
700 u8 buf
[TCW_WHITENING_SIZE
];
702 struct shash_desc desc
;
703 char ctx
[crypto_shash_descsize(tcw
->crc32_tfm
)];
707 /* xor whitening with sector number */
708 memcpy(buf
, tcw
->whitening
, TCW_WHITENING_SIZE
);
709 crypto_xor(buf
, (u8
*)§or
, 8);
710 crypto_xor(&buf
[8], (u8
*)§or
, 8);
712 /* calculate crc32 for every 32bit part and xor it */
713 sdesc
.desc
.tfm
= tcw
->crc32_tfm
;
714 sdesc
.desc
.flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
715 for (i
= 0; i
< 4; i
++) {
716 r
= crypto_shash_init(&sdesc
.desc
);
719 r
= crypto_shash_update(&sdesc
.desc
, &buf
[i
* 4], 4);
722 r
= crypto_shash_final(&sdesc
.desc
, &buf
[i
* 4]);
726 crypto_xor(&buf
[0], &buf
[12], 4);
727 crypto_xor(&buf
[4], &buf
[8], 4);
729 /* apply whitening (8 bytes) to whole sector */
730 for (i
= 0; i
< ((1 << SECTOR_SHIFT
) / 8); i
++)
731 crypto_xor(data
+ i
* 8, buf
, 8);
733 memset(buf
, 0, sizeof(buf
));
737 static int crypt_iv_tcw_gen(struct crypt_config
*cc
, u8
*iv
,
738 struct dm_crypt_request
*dmreq
)
740 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
741 u64 sector
= cpu_to_le64((u64
)dmreq
->iv_sector
);
745 /* Remove whitening from ciphertext */
746 if (bio_data_dir(dmreq
->ctx
->bio_in
) != WRITE
) {
747 src
= kmap_atomic(sg_page(&dmreq
->sg_in
));
748 r
= crypt_iv_tcw_whitening(cc
, dmreq
, src
+ dmreq
->sg_in
.offset
);
753 memcpy(iv
, tcw
->iv_seed
, cc
->iv_size
);
754 crypto_xor(iv
, (u8
*)§or
, 8);
756 crypto_xor(&iv
[8], (u8
*)§or
, cc
->iv_size
- 8);
761 static int crypt_iv_tcw_post(struct crypt_config
*cc
, u8
*iv
,
762 struct dm_crypt_request
*dmreq
)
767 if (bio_data_dir(dmreq
->ctx
->bio_in
) != WRITE
)
770 /* Apply whitening on ciphertext */
771 dst
= kmap_atomic(sg_page(&dmreq
->sg_out
));
772 r
= crypt_iv_tcw_whitening(cc
, dmreq
, dst
+ dmreq
->sg_out
.offset
);
778 static struct crypt_iv_operations crypt_iv_plain_ops
= {
779 .generator
= crypt_iv_plain_gen
782 static struct crypt_iv_operations crypt_iv_plain64_ops
= {
783 .generator
= crypt_iv_plain64_gen
786 static struct crypt_iv_operations crypt_iv_essiv_ops
= {
787 .ctr
= crypt_iv_essiv_ctr
,
788 .dtr
= crypt_iv_essiv_dtr
,
789 .init
= crypt_iv_essiv_init
,
790 .wipe
= crypt_iv_essiv_wipe
,
791 .generator
= crypt_iv_essiv_gen
794 static struct crypt_iv_operations crypt_iv_benbi_ops
= {
795 .ctr
= crypt_iv_benbi_ctr
,
796 .dtr
= crypt_iv_benbi_dtr
,
797 .generator
= crypt_iv_benbi_gen
800 static struct crypt_iv_operations crypt_iv_null_ops
= {
801 .generator
= crypt_iv_null_gen
804 static struct crypt_iv_operations crypt_iv_lmk_ops
= {
805 .ctr
= crypt_iv_lmk_ctr
,
806 .dtr
= crypt_iv_lmk_dtr
,
807 .init
= crypt_iv_lmk_init
,
808 .wipe
= crypt_iv_lmk_wipe
,
809 .generator
= crypt_iv_lmk_gen
,
810 .post
= crypt_iv_lmk_post
813 static struct crypt_iv_operations crypt_iv_tcw_ops
= {
814 .ctr
= crypt_iv_tcw_ctr
,
815 .dtr
= crypt_iv_tcw_dtr
,
816 .init
= crypt_iv_tcw_init
,
817 .wipe
= crypt_iv_tcw_wipe
,
818 .generator
= crypt_iv_tcw_gen
,
819 .post
= crypt_iv_tcw_post
822 static void crypt_convert_init(struct crypt_config
*cc
,
823 struct convert_context
*ctx
,
824 struct bio
*bio_out
, struct bio
*bio_in
,
827 ctx
->bio_in
= bio_in
;
828 ctx
->bio_out
= bio_out
;
831 ctx
->idx_in
= bio_in
? bio_in
->bi_idx
: 0;
832 ctx
->idx_out
= bio_out
? bio_out
->bi_idx
: 0;
833 ctx
->cc_sector
= sector
+ cc
->iv_offset
;
834 init_completion(&ctx
->restart
);
837 static struct dm_crypt_request
*dmreq_of_req(struct crypt_config
*cc
,
838 struct ablkcipher_request
*req
)
840 return (struct dm_crypt_request
*)((char *)req
+ cc
->dmreq_start
);
843 static struct ablkcipher_request
*req_of_dmreq(struct crypt_config
*cc
,
844 struct dm_crypt_request
*dmreq
)
846 return (struct ablkcipher_request
*)((char *)dmreq
- cc
->dmreq_start
);
849 static u8
*iv_of_dmreq(struct crypt_config
*cc
,
850 struct dm_crypt_request
*dmreq
)
852 return (u8
*)ALIGN((unsigned long)(dmreq
+ 1),
853 crypto_ablkcipher_alignmask(any_tfm(cc
)) + 1);
856 static int crypt_convert_block(struct crypt_config
*cc
,
857 struct convert_context
*ctx
,
858 struct ablkcipher_request
*req
)
860 struct bio_vec
*bv_in
= bio_iovec_idx(ctx
->bio_in
, ctx
->idx_in
);
861 struct bio_vec
*bv_out
= bio_iovec_idx(ctx
->bio_out
, ctx
->idx_out
);
862 struct dm_crypt_request
*dmreq
;
866 dmreq
= dmreq_of_req(cc
, req
);
867 iv
= iv_of_dmreq(cc
, dmreq
);
869 dmreq
->iv_sector
= ctx
->cc_sector
;
871 sg_init_table(&dmreq
->sg_in
, 1);
872 sg_set_page(&dmreq
->sg_in
, bv_in
->bv_page
, 1 << SECTOR_SHIFT
,
873 bv_in
->bv_offset
+ ctx
->offset_in
);
875 sg_init_table(&dmreq
->sg_out
, 1);
876 sg_set_page(&dmreq
->sg_out
, bv_out
->bv_page
, 1 << SECTOR_SHIFT
,
877 bv_out
->bv_offset
+ ctx
->offset_out
);
879 ctx
->offset_in
+= 1 << SECTOR_SHIFT
;
880 if (ctx
->offset_in
>= bv_in
->bv_len
) {
885 ctx
->offset_out
+= 1 << SECTOR_SHIFT
;
886 if (ctx
->offset_out
>= bv_out
->bv_len
) {
891 if (cc
->iv_gen_ops
) {
892 r
= cc
->iv_gen_ops
->generator(cc
, iv
, dmreq
);
897 ablkcipher_request_set_crypt(req
, &dmreq
->sg_in
, &dmreq
->sg_out
,
898 1 << SECTOR_SHIFT
, iv
);
900 if (bio_data_dir(ctx
->bio_in
) == WRITE
)
901 r
= crypto_ablkcipher_encrypt(req
);
903 r
= crypto_ablkcipher_decrypt(req
);
905 if (!r
&& cc
->iv_gen_ops
&& cc
->iv_gen_ops
->post
)
906 r
= cc
->iv_gen_ops
->post(cc
, iv
, dmreq
);
911 static void kcryptd_async_done(struct crypto_async_request
*async_req
,
914 static void crypt_alloc_req(struct crypt_config
*cc
,
915 struct convert_context
*ctx
)
917 struct crypt_cpu
*this_cc
= this_crypt_config(cc
);
918 unsigned key_index
= ctx
->cc_sector
& (cc
->tfms_count
- 1);
921 this_cc
->req
= mempool_alloc(cc
->req_pool
, GFP_NOIO
);
923 ablkcipher_request_set_tfm(this_cc
->req
, cc
->tfms
[key_index
]);
924 ablkcipher_request_set_callback(this_cc
->req
,
925 CRYPTO_TFM_REQ_MAY_BACKLOG
| CRYPTO_TFM_REQ_MAY_SLEEP
,
926 kcryptd_async_done
, dmreq_of_req(cc
, this_cc
->req
));
930 * Encrypt / decrypt data from one bio to another one (can be the same one)
932 static int crypt_convert(struct crypt_config
*cc
,
933 struct convert_context
*ctx
)
935 struct crypt_cpu
*this_cc
= this_crypt_config(cc
);
938 atomic_set(&ctx
->cc_pending
, 1);
940 while(ctx
->idx_in
< ctx
->bio_in
->bi_vcnt
&&
941 ctx
->idx_out
< ctx
->bio_out
->bi_vcnt
) {
943 crypt_alloc_req(cc
, ctx
);
945 atomic_inc(&ctx
->cc_pending
);
947 r
= crypt_convert_block(cc
, ctx
, this_cc
->req
);
952 wait_for_completion(&ctx
->restart
);
953 reinit_completion(&ctx
->restart
);
962 atomic_dec(&ctx
->cc_pending
);
969 atomic_dec(&ctx
->cc_pending
);
978 * Generate a new unfragmented bio with the given size
979 * This should never violate the device limitations
980 * May return a smaller bio when running out of pages, indicated by
981 * *out_of_pages set to 1.
983 static struct bio
*crypt_alloc_buffer(struct dm_crypt_io
*io
, unsigned size
,
984 unsigned *out_of_pages
)
986 struct crypt_config
*cc
= io
->cc
;
988 unsigned int nr_iovecs
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
989 gfp_t gfp_mask
= GFP_NOIO
| __GFP_HIGHMEM
;
993 clone
= bio_alloc_bioset(GFP_NOIO
, nr_iovecs
, cc
->bs
);
997 clone_init(io
, clone
);
1000 for (i
= 0; i
< nr_iovecs
; i
++) {
1001 page
= mempool_alloc(cc
->page_pool
, gfp_mask
);
1008 * If additional pages cannot be allocated without waiting,
1009 * return a partially-allocated bio. The caller will then try
1010 * to allocate more bios while submitting this partial bio.
1012 gfp_mask
= (gfp_mask
| __GFP_NOWARN
) & ~__GFP_WAIT
;
1014 len
= (size
> PAGE_SIZE
) ? PAGE_SIZE
: size
;
1016 if (!bio_add_page(clone
, page
, len
, 0)) {
1017 mempool_free(page
, cc
->page_pool
);
1024 if (!clone
->bi_size
) {
1032 static void crypt_free_buffer_pages(struct crypt_config
*cc
, struct bio
*clone
)
1037 bio_for_each_segment_all(bv
, clone
, i
) {
1038 BUG_ON(!bv
->bv_page
);
1039 mempool_free(bv
->bv_page
, cc
->page_pool
);
1044 static struct dm_crypt_io
*crypt_io_alloc(struct crypt_config
*cc
,
1045 struct bio
*bio
, sector_t sector
)
1047 struct dm_crypt_io
*io
;
1049 io
= mempool_alloc(cc
->io_pool
, GFP_NOIO
);
1052 io
->sector
= sector
;
1055 atomic_set(&io
->io_pending
, 0);
1060 static void crypt_inc_pending(struct dm_crypt_io
*io
)
1062 atomic_inc(&io
->io_pending
);
1066 * One of the bios was finished. Check for completion of
1067 * the whole request and correctly clean up the buffer.
1068 * If base_io is set, wait for the last fragment to complete.
1070 static void crypt_dec_pending(struct dm_crypt_io
*io
)
1072 struct crypt_config
*cc
= io
->cc
;
1073 struct bio
*base_bio
= io
->base_bio
;
1074 struct dm_crypt_io
*base_io
= io
->base_io
;
1075 int error
= io
->error
;
1077 if (!atomic_dec_and_test(&io
->io_pending
))
1080 mempool_free(io
, cc
->io_pool
);
1082 if (likely(!base_io
))
1083 bio_endio(base_bio
, error
);
1085 if (error
&& !base_io
->error
)
1086 base_io
->error
= error
;
1087 crypt_dec_pending(base_io
);
1092 * kcryptd/kcryptd_io:
1094 * Needed because it would be very unwise to do decryption in an
1095 * interrupt context.
1097 * kcryptd performs the actual encryption or decryption.
1099 * kcryptd_io performs the IO submission.
1101 * They must be separated as otherwise the final stages could be
1102 * starved by new requests which can block in the first stages due
1103 * to memory allocation.
1105 * The work is done per CPU global for all dm-crypt instances.
1106 * They should not depend on each other and do not block.
1108 static void crypt_endio(struct bio
*clone
, int error
)
1110 struct dm_crypt_io
*io
= clone
->bi_private
;
1111 struct crypt_config
*cc
= io
->cc
;
1112 unsigned rw
= bio_data_dir(clone
);
1114 if (unlikely(!bio_flagged(clone
, BIO_UPTODATE
) && !error
))
1118 * free the processed pages
1121 crypt_free_buffer_pages(cc
, clone
);
1125 if (rw
== READ
&& !error
) {
1126 kcryptd_queue_crypt(io
);
1130 if (unlikely(error
))
1133 crypt_dec_pending(io
);
1136 static void clone_init(struct dm_crypt_io
*io
, struct bio
*clone
)
1138 struct crypt_config
*cc
= io
->cc
;
1140 clone
->bi_private
= io
;
1141 clone
->bi_end_io
= crypt_endio
;
1142 clone
->bi_bdev
= cc
->dev
->bdev
;
1143 clone
->bi_rw
= io
->base_bio
->bi_rw
;
1146 static int kcryptd_io_read(struct dm_crypt_io
*io
, gfp_t gfp
)
1148 struct crypt_config
*cc
= io
->cc
;
1149 struct bio
*base_bio
= io
->base_bio
;
1153 * The block layer might modify the bvec array, so always
1154 * copy the required bvecs because we need the original
1155 * one in order to decrypt the whole bio data *afterwards*.
1157 clone
= bio_clone_bioset(base_bio
, gfp
, cc
->bs
);
1161 crypt_inc_pending(io
);
1163 clone_init(io
, clone
);
1164 clone
->bi_sector
= cc
->start
+ io
->sector
;
1166 generic_make_request(clone
);
1170 static void kcryptd_io_write(struct dm_crypt_io
*io
)
1172 struct bio
*clone
= io
->ctx
.bio_out
;
1173 generic_make_request(clone
);
1176 static void kcryptd_io(struct work_struct
*work
)
1178 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
1180 if (bio_data_dir(io
->base_bio
) == READ
) {
1181 crypt_inc_pending(io
);
1182 if (kcryptd_io_read(io
, GFP_NOIO
))
1183 io
->error
= -ENOMEM
;
1184 crypt_dec_pending(io
);
1186 kcryptd_io_write(io
);
1189 static void kcryptd_queue_io(struct dm_crypt_io
*io
)
1191 struct crypt_config
*cc
= io
->cc
;
1193 INIT_WORK(&io
->work
, kcryptd_io
);
1194 queue_work(cc
->io_queue
, &io
->work
);
1197 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io
*io
, int async
)
1199 struct bio
*clone
= io
->ctx
.bio_out
;
1200 struct crypt_config
*cc
= io
->cc
;
1202 if (unlikely(io
->error
< 0)) {
1203 crypt_free_buffer_pages(cc
, clone
);
1205 crypt_dec_pending(io
);
1209 /* crypt_convert should have filled the clone bio */
1210 BUG_ON(io
->ctx
.idx_out
< clone
->bi_vcnt
);
1212 clone
->bi_sector
= cc
->start
+ io
->sector
;
1215 kcryptd_queue_io(io
);
1217 generic_make_request(clone
);
1220 static void kcryptd_crypt_write_convert(struct dm_crypt_io
*io
)
1222 struct crypt_config
*cc
= io
->cc
;
1224 struct dm_crypt_io
*new_io
;
1226 unsigned out_of_pages
= 0;
1227 unsigned remaining
= io
->base_bio
->bi_size
;
1228 sector_t sector
= io
->sector
;
1232 * Prevent io from disappearing until this function completes.
1234 crypt_inc_pending(io
);
1235 crypt_convert_init(cc
, &io
->ctx
, NULL
, io
->base_bio
, sector
);
1238 * The allocated buffers can be smaller than the whole bio,
1239 * so repeat the whole process until all the data can be handled.
1242 clone
= crypt_alloc_buffer(io
, remaining
, &out_of_pages
);
1243 if (unlikely(!clone
)) {
1244 io
->error
= -ENOMEM
;
1248 io
->ctx
.bio_out
= clone
;
1249 io
->ctx
.idx_out
= 0;
1251 remaining
-= clone
->bi_size
;
1252 sector
+= bio_sectors(clone
);
1254 crypt_inc_pending(io
);
1256 r
= crypt_convert(cc
, &io
->ctx
);
1260 crypt_finished
= atomic_dec_and_test(&io
->ctx
.cc_pending
);
1262 /* Encryption was already finished, submit io now */
1263 if (crypt_finished
) {
1264 kcryptd_crypt_write_io_submit(io
, 0);
1267 * If there was an error, do not try next fragments.
1268 * For async, error is processed in async handler.
1270 if (unlikely(r
< 0))
1273 io
->sector
= sector
;
1277 * Out of memory -> run queues
1278 * But don't wait if split was due to the io size restriction
1280 if (unlikely(out_of_pages
))
1281 congestion_wait(BLK_RW_ASYNC
, HZ
/100);
1284 * With async crypto it is unsafe to share the crypto context
1285 * between fragments, so switch to a new dm_crypt_io structure.
1287 if (unlikely(!crypt_finished
&& remaining
)) {
1288 new_io
= crypt_io_alloc(io
->cc
, io
->base_bio
,
1290 crypt_inc_pending(new_io
);
1291 crypt_convert_init(cc
, &new_io
->ctx
, NULL
,
1292 io
->base_bio
, sector
);
1293 new_io
->ctx
.idx_in
= io
->ctx
.idx_in
;
1294 new_io
->ctx
.offset_in
= io
->ctx
.offset_in
;
1297 * Fragments after the first use the base_io
1301 new_io
->base_io
= io
;
1303 new_io
->base_io
= io
->base_io
;
1304 crypt_inc_pending(io
->base_io
);
1305 crypt_dec_pending(io
);
1312 crypt_dec_pending(io
);
1315 static void kcryptd_crypt_read_done(struct dm_crypt_io
*io
)
1317 crypt_dec_pending(io
);
1320 static void kcryptd_crypt_read_convert(struct dm_crypt_io
*io
)
1322 struct crypt_config
*cc
= io
->cc
;
1325 crypt_inc_pending(io
);
1327 crypt_convert_init(cc
, &io
->ctx
, io
->base_bio
, io
->base_bio
,
1330 r
= crypt_convert(cc
, &io
->ctx
);
1334 if (atomic_dec_and_test(&io
->ctx
.cc_pending
))
1335 kcryptd_crypt_read_done(io
);
1337 crypt_dec_pending(io
);
1340 static void kcryptd_async_done(struct crypto_async_request
*async_req
,
1343 struct dm_crypt_request
*dmreq
= async_req
->data
;
1344 struct convert_context
*ctx
= dmreq
->ctx
;
1345 struct dm_crypt_io
*io
= container_of(ctx
, struct dm_crypt_io
, ctx
);
1346 struct crypt_config
*cc
= io
->cc
;
1348 if (error
== -EINPROGRESS
) {
1349 complete(&ctx
->restart
);
1353 if (!error
&& cc
->iv_gen_ops
&& cc
->iv_gen_ops
->post
)
1354 error
= cc
->iv_gen_ops
->post(cc
, iv_of_dmreq(cc
, dmreq
), dmreq
);
1359 mempool_free(req_of_dmreq(cc
, dmreq
), cc
->req_pool
);
1361 if (!atomic_dec_and_test(&ctx
->cc_pending
))
1364 if (bio_data_dir(io
->base_bio
) == READ
)
1365 kcryptd_crypt_read_done(io
);
1367 kcryptd_crypt_write_io_submit(io
, 1);
1370 static void kcryptd_crypt(struct work_struct
*work
)
1372 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
1374 if (bio_data_dir(io
->base_bio
) == READ
)
1375 kcryptd_crypt_read_convert(io
);
1377 kcryptd_crypt_write_convert(io
);
1380 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
)
1382 struct crypt_config
*cc
= io
->cc
;
1384 INIT_WORK(&io
->work
, kcryptd_crypt
);
1385 queue_work(cc
->crypt_queue
, &io
->work
);
1389 * Decode key from its hex representation
1391 static int crypt_decode_key(u8
*key
, char *hex
, unsigned int size
)
1398 for (i
= 0; i
< size
; i
++) {
1402 if (kstrtou8(buffer
, 16, &key
[i
]))
1412 static void crypt_free_tfms(struct crypt_config
*cc
)
1419 for (i
= 0; i
< cc
->tfms_count
; i
++)
1420 if (cc
->tfms
[i
] && !IS_ERR(cc
->tfms
[i
])) {
1421 crypto_free_ablkcipher(cc
->tfms
[i
]);
1429 static int crypt_alloc_tfms(struct crypt_config
*cc
, char *ciphermode
)
1434 cc
->tfms
= kmalloc(cc
->tfms_count
* sizeof(struct crypto_ablkcipher
*),
1439 for (i
= 0; i
< cc
->tfms_count
; i
++) {
1440 cc
->tfms
[i
] = crypto_alloc_ablkcipher(ciphermode
, 0, 0);
1441 if (IS_ERR(cc
->tfms
[i
])) {
1442 err
= PTR_ERR(cc
->tfms
[i
]);
1443 crypt_free_tfms(cc
);
1451 static int crypt_setkey_allcpus(struct crypt_config
*cc
)
1453 unsigned subkey_size
;
1456 /* Ignore extra keys (which are used for IV etc) */
1457 subkey_size
= (cc
->key_size
- cc
->key_extra_size
) >> ilog2(cc
->tfms_count
);
1459 for (i
= 0; i
< cc
->tfms_count
; i
++) {
1460 r
= crypto_ablkcipher_setkey(cc
->tfms
[i
],
1461 cc
->key
+ (i
* subkey_size
),
1470 static int crypt_set_key(struct crypt_config
*cc
, char *key
)
1473 int key_string_len
= strlen(key
);
1475 /* The key size may not be changed. */
1476 if (cc
->key_size
!= (key_string_len
>> 1))
1479 /* Hyphen (which gives a key_size of zero) means there is no key. */
1480 if (!cc
->key_size
&& strcmp(key
, "-"))
1483 if (cc
->key_size
&& crypt_decode_key(cc
->key
, key
, cc
->key_size
) < 0)
1486 set_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
1488 r
= crypt_setkey_allcpus(cc
);
1491 /* Hex key string not needed after here, so wipe it. */
1492 memset(key
, '0', key_string_len
);
1497 static int crypt_wipe_key(struct crypt_config
*cc
)
1499 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
1500 memset(&cc
->key
, 0, cc
->key_size
* sizeof(u8
));
1502 return crypt_setkey_allcpus(cc
);
1505 static void crypt_dtr(struct dm_target
*ti
)
1507 struct crypt_config
*cc
= ti
->private;
1508 struct crypt_cpu
*cpu_cc
;
1517 destroy_workqueue(cc
->io_queue
);
1518 if (cc
->crypt_queue
)
1519 destroy_workqueue(cc
->crypt_queue
);
1522 for_each_possible_cpu(cpu
) {
1523 cpu_cc
= per_cpu_ptr(cc
->cpu
, cpu
);
1525 mempool_free(cpu_cc
->req
, cc
->req_pool
);
1528 crypt_free_tfms(cc
);
1531 bioset_free(cc
->bs
);
1534 mempool_destroy(cc
->page_pool
);
1536 mempool_destroy(cc
->req_pool
);
1538 mempool_destroy(cc
->io_pool
);
1540 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->dtr
)
1541 cc
->iv_gen_ops
->dtr(cc
);
1544 dm_put_device(ti
, cc
->dev
);
1547 free_percpu(cc
->cpu
);
1550 kzfree(cc
->cipher_string
);
1552 /* Must zero key material before freeing */
1556 static int crypt_ctr_cipher(struct dm_target
*ti
,
1557 char *cipher_in
, char *key
)
1559 struct crypt_config
*cc
= ti
->private;
1560 char *tmp
, *cipher
, *chainmode
, *ivmode
, *ivopts
, *keycount
;
1561 char *cipher_api
= NULL
;
1565 /* Convert to crypto api definition? */
1566 if (strchr(cipher_in
, '(')) {
1567 ti
->error
= "Bad cipher specification";
1571 cc
->cipher_string
= kstrdup(cipher_in
, GFP_KERNEL
);
1572 if (!cc
->cipher_string
)
1576 * Legacy dm-crypt cipher specification
1577 * cipher[:keycount]-mode-iv:ivopts
1580 keycount
= strsep(&tmp
, "-");
1581 cipher
= strsep(&keycount
, ":");
1585 else if (sscanf(keycount
, "%u%c", &cc
->tfms_count
, &dummy
) != 1 ||
1586 !is_power_of_2(cc
->tfms_count
)) {
1587 ti
->error
= "Bad cipher key count specification";
1590 cc
->key_parts
= cc
->tfms_count
;
1591 cc
->key_extra_size
= 0;
1593 cc
->cipher
= kstrdup(cipher
, GFP_KERNEL
);
1597 chainmode
= strsep(&tmp
, "-");
1598 ivopts
= strsep(&tmp
, "-");
1599 ivmode
= strsep(&ivopts
, ":");
1602 DMWARN("Ignoring unexpected additional cipher options");
1604 cc
->cpu
= __alloc_percpu(sizeof(*(cc
->cpu
)),
1605 __alignof__(struct crypt_cpu
));
1607 ti
->error
= "Cannot allocate per cpu state";
1612 * For compatibility with the original dm-crypt mapping format, if
1613 * only the cipher name is supplied, use cbc-plain.
1615 if (!chainmode
|| (!strcmp(chainmode
, "plain") && !ivmode
)) {
1620 if (strcmp(chainmode
, "ecb") && !ivmode
) {
1621 ti
->error
= "IV mechanism required";
1625 cipher_api
= kmalloc(CRYPTO_MAX_ALG_NAME
, GFP_KERNEL
);
1629 ret
= snprintf(cipher_api
, CRYPTO_MAX_ALG_NAME
,
1630 "%s(%s)", chainmode
, cipher
);
1636 /* Allocate cipher */
1637 ret
= crypt_alloc_tfms(cc
, cipher_api
);
1639 ti
->error
= "Error allocating crypto tfm";
1644 cc
->iv_size
= crypto_ablkcipher_ivsize(any_tfm(cc
));
1646 /* at least a 64 bit sector number should fit in our buffer */
1647 cc
->iv_size
= max(cc
->iv_size
,
1648 (unsigned int)(sizeof(u64
) / sizeof(u8
)));
1650 DMWARN("Selected cipher does not support IVs");
1654 /* Choose ivmode, see comments at iv code. */
1656 cc
->iv_gen_ops
= NULL
;
1657 else if (strcmp(ivmode
, "plain") == 0)
1658 cc
->iv_gen_ops
= &crypt_iv_plain_ops
;
1659 else if (strcmp(ivmode
, "plain64") == 0)
1660 cc
->iv_gen_ops
= &crypt_iv_plain64_ops
;
1661 else if (strcmp(ivmode
, "essiv") == 0)
1662 cc
->iv_gen_ops
= &crypt_iv_essiv_ops
;
1663 else if (strcmp(ivmode
, "benbi") == 0)
1664 cc
->iv_gen_ops
= &crypt_iv_benbi_ops
;
1665 else if (strcmp(ivmode
, "null") == 0)
1666 cc
->iv_gen_ops
= &crypt_iv_null_ops
;
1667 else if (strcmp(ivmode
, "lmk") == 0) {
1668 cc
->iv_gen_ops
= &crypt_iv_lmk_ops
;
1670 * Version 2 and 3 is recognised according
1671 * to length of provided multi-key string.
1672 * If present (version 3), last key is used as IV seed.
1673 * All keys (including IV seed) are always the same size.
1675 if (cc
->key_size
% cc
->key_parts
) {
1677 cc
->key_extra_size
= cc
->key_size
/ cc
->key_parts
;
1679 } else if (strcmp(ivmode
, "tcw") == 0) {
1680 cc
->iv_gen_ops
= &crypt_iv_tcw_ops
;
1681 cc
->key_parts
+= 2; /* IV + whitening */
1682 cc
->key_extra_size
= cc
->iv_size
+ TCW_WHITENING_SIZE
;
1685 ti
->error
= "Invalid IV mode";
1689 /* Initialize and set key */
1690 ret
= crypt_set_key(cc
, key
);
1692 ti
->error
= "Error decoding and setting key";
1697 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->ctr
) {
1698 ret
= cc
->iv_gen_ops
->ctr(cc
, ti
, ivopts
);
1700 ti
->error
= "Error creating IV";
1705 /* Initialize IV (set keys for ESSIV etc) */
1706 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->init
) {
1707 ret
= cc
->iv_gen_ops
->init(cc
);
1709 ti
->error
= "Error initialising IV";
1720 ti
->error
= "Cannot allocate cipher strings";
1725 * Construct an encryption mapping:
1726 * <cipher> <key> <iv_offset> <dev_path> <start>
1728 static int crypt_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1730 struct crypt_config
*cc
;
1731 unsigned int key_size
, opt_params
;
1732 unsigned long long tmpll
;
1734 struct dm_arg_set as
;
1735 const char *opt_string
;
1738 static struct dm_arg _args
[] = {
1739 {0, 1, "Invalid number of feature args"},
1743 ti
->error
= "Not enough arguments";
1747 key_size
= strlen(argv
[1]) >> 1;
1749 cc
= kzalloc(sizeof(*cc
) + key_size
* sizeof(u8
), GFP_KERNEL
);
1751 ti
->error
= "Cannot allocate encryption context";
1754 cc
->key_size
= key_size
;
1757 ret
= crypt_ctr_cipher(ti
, argv
[0], argv
[1]);
1762 cc
->io_pool
= mempool_create_slab_pool(MIN_IOS
, _crypt_io_pool
);
1764 ti
->error
= "Cannot allocate crypt io mempool";
1768 cc
->dmreq_start
= sizeof(struct ablkcipher_request
);
1769 cc
->dmreq_start
+= crypto_ablkcipher_reqsize(any_tfm(cc
));
1770 cc
->dmreq_start
= ALIGN(cc
->dmreq_start
, crypto_tfm_ctx_alignment());
1771 cc
->dmreq_start
+= crypto_ablkcipher_alignmask(any_tfm(cc
)) &
1772 ~(crypto_tfm_ctx_alignment() - 1);
1774 cc
->req_pool
= mempool_create_kmalloc_pool(MIN_IOS
, cc
->dmreq_start
+
1775 sizeof(struct dm_crypt_request
) + cc
->iv_size
);
1776 if (!cc
->req_pool
) {
1777 ti
->error
= "Cannot allocate crypt request mempool";
1781 cc
->page_pool
= mempool_create_page_pool(MIN_POOL_PAGES
, 0);
1782 if (!cc
->page_pool
) {
1783 ti
->error
= "Cannot allocate page mempool";
1787 cc
->bs
= bioset_create(MIN_IOS
, 0);
1789 ti
->error
= "Cannot allocate crypt bioset";
1794 if (sscanf(argv
[2], "%llu%c", &tmpll
, &dummy
) != 1) {
1795 ti
->error
= "Invalid iv_offset sector";
1798 cc
->iv_offset
= tmpll
;
1800 if (dm_get_device(ti
, argv
[3], dm_table_get_mode(ti
->table
), &cc
->dev
)) {
1801 ti
->error
= "Device lookup failed";
1805 if (sscanf(argv
[4], "%llu%c", &tmpll
, &dummy
) != 1) {
1806 ti
->error
= "Invalid device sector";
1814 /* Optional parameters */
1819 ret
= dm_read_arg_group(_args
, &as
, &opt_params
, &ti
->error
);
1823 opt_string
= dm_shift_arg(&as
);
1825 if (opt_params
== 1 && opt_string
&&
1826 !strcasecmp(opt_string
, "allow_discards"))
1827 ti
->num_discard_bios
= 1;
1828 else if (opt_params
) {
1830 ti
->error
= "Invalid feature arguments";
1836 cc
->io_queue
= alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM
, 1);
1837 if (!cc
->io_queue
) {
1838 ti
->error
= "Couldn't create kcryptd io queue";
1842 cc
->crypt_queue
= alloc_workqueue("kcryptd",
1843 WQ_CPU_INTENSIVE
| WQ_MEM_RECLAIM
, 1);
1844 if (!cc
->crypt_queue
) {
1845 ti
->error
= "Couldn't create kcryptd queue";
1849 ti
->num_flush_bios
= 1;
1850 ti
->discard_zeroes_data_unsupported
= true;
1859 static int crypt_map(struct dm_target
*ti
, struct bio
*bio
)
1861 struct dm_crypt_io
*io
;
1862 struct crypt_config
*cc
= ti
->private;
1865 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
1866 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
1867 * - for REQ_DISCARD caller must use flush if IO ordering matters
1869 if (unlikely(bio
->bi_rw
& (REQ_FLUSH
| REQ_DISCARD
))) {
1870 bio
->bi_bdev
= cc
->dev
->bdev
;
1871 if (bio_sectors(bio
))
1872 bio
->bi_sector
= cc
->start
+ dm_target_offset(ti
, bio
->bi_sector
);
1873 return DM_MAPIO_REMAPPED
;
1876 io
= crypt_io_alloc(cc
, bio
, dm_target_offset(ti
, bio
->bi_sector
));
1878 if (bio_data_dir(io
->base_bio
) == READ
) {
1879 if (kcryptd_io_read(io
, GFP_NOWAIT
))
1880 kcryptd_queue_io(io
);
1882 kcryptd_queue_crypt(io
);
1884 return DM_MAPIO_SUBMITTED
;
1887 static void crypt_status(struct dm_target
*ti
, status_type_t type
,
1888 unsigned status_flags
, char *result
, unsigned maxlen
)
1890 struct crypt_config
*cc
= ti
->private;
1894 case STATUSTYPE_INFO
:
1898 case STATUSTYPE_TABLE
:
1899 DMEMIT("%s ", cc
->cipher_string
);
1901 if (cc
->key_size
> 0)
1902 for (i
= 0; i
< cc
->key_size
; i
++)
1903 DMEMIT("%02x", cc
->key
[i
]);
1907 DMEMIT(" %llu %s %llu", (unsigned long long)cc
->iv_offset
,
1908 cc
->dev
->name
, (unsigned long long)cc
->start
);
1910 if (ti
->num_discard_bios
)
1911 DMEMIT(" 1 allow_discards");
1917 static void crypt_postsuspend(struct dm_target
*ti
)
1919 struct crypt_config
*cc
= ti
->private;
1921 set_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
1924 static int crypt_preresume(struct dm_target
*ti
)
1926 struct crypt_config
*cc
= ti
->private;
1928 if (!test_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
)) {
1929 DMERR("aborting resume - crypt key is not set.");
1936 static void crypt_resume(struct dm_target
*ti
)
1938 struct crypt_config
*cc
= ti
->private;
1940 clear_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
1943 /* Message interface
1947 static int crypt_message(struct dm_target
*ti
, unsigned argc
, char **argv
)
1949 struct crypt_config
*cc
= ti
->private;
1955 if (!strcasecmp(argv
[0], "key")) {
1956 if (!test_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
)) {
1957 DMWARN("not suspended during key manipulation.");
1960 if (argc
== 3 && !strcasecmp(argv
[1], "set")) {
1961 ret
= crypt_set_key(cc
, argv
[2]);
1964 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->init
)
1965 ret
= cc
->iv_gen_ops
->init(cc
);
1968 if (argc
== 2 && !strcasecmp(argv
[1], "wipe")) {
1969 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->wipe
) {
1970 ret
= cc
->iv_gen_ops
->wipe(cc
);
1974 return crypt_wipe_key(cc
);
1979 DMWARN("unrecognised message received.");
1983 static int crypt_merge(struct dm_target
*ti
, struct bvec_merge_data
*bvm
,
1984 struct bio_vec
*biovec
, int max_size
)
1986 struct crypt_config
*cc
= ti
->private;
1987 struct request_queue
*q
= bdev_get_queue(cc
->dev
->bdev
);
1989 if (!q
->merge_bvec_fn
)
1992 bvm
->bi_bdev
= cc
->dev
->bdev
;
1993 bvm
->bi_sector
= cc
->start
+ dm_target_offset(ti
, bvm
->bi_sector
);
1995 return min(max_size
, q
->merge_bvec_fn(q
, bvm
, biovec
));
1998 static int crypt_iterate_devices(struct dm_target
*ti
,
1999 iterate_devices_callout_fn fn
, void *data
)
2001 struct crypt_config
*cc
= ti
->private;
2003 return fn(ti
, cc
->dev
, cc
->start
, ti
->len
, data
);
2006 static struct target_type crypt_target
= {
2008 .version
= {1, 13, 0},
2009 .module
= THIS_MODULE
,
2013 .status
= crypt_status
,
2014 .postsuspend
= crypt_postsuspend
,
2015 .preresume
= crypt_preresume
,
2016 .resume
= crypt_resume
,
2017 .message
= crypt_message
,
2018 .merge
= crypt_merge
,
2019 .iterate_devices
= crypt_iterate_devices
,
2022 static int __init
dm_crypt_init(void)
2026 _crypt_io_pool
= KMEM_CACHE(dm_crypt_io
, 0);
2027 if (!_crypt_io_pool
)
2030 r
= dm_register_target(&crypt_target
);
2032 DMERR("register failed %d", r
);
2033 kmem_cache_destroy(_crypt_io_pool
);
2039 static void __exit
dm_crypt_exit(void)
2041 dm_unregister_target(&crypt_target
);
2042 kmem_cache_destroy(_crypt_io_pool
);
2045 module_init(dm_crypt_init
);
2046 module_exit(dm_crypt_exit
);
2048 MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
2049 MODULE_DESCRIPTION(DM_NAME
" target for transparent encryption / decryption");
2050 MODULE_LICENSE("GPL");