Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2003 Christophe Saout <christophe@saout.de> | |
3 | * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> | |
e48d4bbf | 4 | * Copyright (C) 2006 Red Hat, Inc. All rights reserved. |
1da177e4 LT |
5 | * |
6 | * This file is released under the GPL. | |
7 | */ | |
8 | ||
d1806f6a | 9 | #include <linux/err.h> |
1da177e4 LT |
10 | #include <linux/module.h> |
11 | #include <linux/init.h> | |
12 | #include <linux/kernel.h> | |
13 | #include <linux/bio.h> | |
14 | #include <linux/blkdev.h> | |
15 | #include <linux/mempool.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/crypto.h> | |
18 | #include <linux/workqueue.h> | |
19 | #include <asm/atomic.h> | |
378f058c | 20 | #include <linux/scatterlist.h> |
1da177e4 LT |
21 | #include <asm/page.h> |
22 | ||
23 | #include "dm.h" | |
24 | ||
72d94861 | 25 | #define DM_MSG_PREFIX "crypt" |
e48d4bbf | 26 | #define MESG_STR(x) x, sizeof(x) |
1da177e4 LT |
27 | |
28 | /* | |
29 | * per bio private data | |
30 | */ | |
31 | struct crypt_io { | |
32 | struct dm_target *target; | |
8b004457 | 33 | struct bio *base_bio; |
1da177e4 LT |
34 | struct bio *first_clone; |
35 | struct work_struct work; | |
36 | atomic_t pending; | |
37 | int error; | |
23541d2d | 38 | int post_process; |
1da177e4 LT |
39 | }; |
40 | ||
41 | /* | |
42 | * context holding the current state of a multi-part conversion | |
43 | */ | |
44 | struct convert_context { | |
45 | struct bio *bio_in; | |
46 | struct bio *bio_out; | |
47 | unsigned int offset_in; | |
48 | unsigned int offset_out; | |
49 | unsigned int idx_in; | |
50 | unsigned int idx_out; | |
51 | sector_t sector; | |
52 | int write; | |
53 | }; | |
54 | ||
55 | struct crypt_config; | |
56 | ||
57 | struct crypt_iv_operations { | |
58 | int (*ctr)(struct crypt_config *cc, struct dm_target *ti, | |
59 | const char *opts); | |
60 | void (*dtr)(struct crypt_config *cc); | |
61 | const char *(*status)(struct crypt_config *cc); | |
62 | int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); | |
63 | }; | |
64 | ||
65 | /* | |
66 | * Crypt: maps a linear range of a block device | |
67 | * and encrypts / decrypts at the same time. | |
68 | */ | |
e48d4bbf | 69 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; |
1da177e4 LT |
70 | struct crypt_config { |
71 | struct dm_dev *dev; | |
72 | sector_t start; | |
73 | ||
74 | /* | |
75 | * pool for per bio private data and | |
76 | * for encryption buffer pages | |
77 | */ | |
78 | mempool_t *io_pool; | |
79 | mempool_t *page_pool; | |
6a24c718 | 80 | struct bio_set *bs; |
1da177e4 LT |
81 | |
82 | /* | |
83 | * crypto related data | |
84 | */ | |
85 | struct crypt_iv_operations *iv_gen_ops; | |
86 | char *iv_mode; | |
d1806f6a | 87 | struct crypto_cipher *iv_gen_private; |
1da177e4 LT |
88 | sector_t iv_offset; |
89 | unsigned int iv_size; | |
90 | ||
d1806f6a HX |
91 | char cipher[CRYPTO_MAX_ALG_NAME]; |
92 | char chainmode[CRYPTO_MAX_ALG_NAME]; | |
93 | struct crypto_blkcipher *tfm; | |
e48d4bbf | 94 | unsigned long flags; |
1da177e4 LT |
95 | unsigned int key_size; |
96 | u8 key[0]; | |
97 | }; | |
98 | ||
6a24c718 | 99 | #define MIN_IOS 16 |
1da177e4 LT |
100 | #define MIN_POOL_PAGES 32 |
101 | #define MIN_BIO_PAGES 8 | |
102 | ||
103 | static kmem_cache_t *_crypt_io_pool; | |
104 | ||
1da177e4 LT |
105 | /* |
106 | * Different IV generation algorithms: | |
107 | * | |
3c164bd8 | 108 | * plain: the initial vector is the 32-bit little-endian version of the sector |
1da177e4 LT |
109 | * number, padded with zeros if neccessary. |
110 | * | |
3c164bd8 RS |
111 | * essiv: "encrypted sector|salt initial vector", the sector number is |
112 | * encrypted with the bulk cipher using a salt as key. The salt | |
113 | * should be derived from the bulk cipher's key via hashing. | |
1da177e4 LT |
114 | * |
115 | * plumb: unimplemented, see: | |
116 | * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 | |
117 | */ | |
118 | ||
119 | static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | |
120 | { | |
121 | memset(iv, 0, cc->iv_size); | |
122 | *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); | |
123 | ||
124 | return 0; | |
125 | } | |
126 | ||
127 | static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | |
128 | const char *opts) | |
129 | { | |
d1806f6a | 130 | struct crypto_cipher *essiv_tfm; |
35058687 HX |
131 | struct crypto_hash *hash_tfm; |
132 | struct hash_desc desc; | |
1da177e4 LT |
133 | struct scatterlist sg; |
134 | unsigned int saltsize; | |
135 | u8 *salt; | |
d1806f6a | 136 | int err; |
1da177e4 LT |
137 | |
138 | if (opts == NULL) { | |
72d94861 | 139 | ti->error = "Digest algorithm missing for ESSIV mode"; |
1da177e4 LT |
140 | return -EINVAL; |
141 | } | |
142 | ||
143 | /* Hash the cipher key with the given hash algorithm */ | |
35058687 HX |
144 | hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); |
145 | if (IS_ERR(hash_tfm)) { | |
72d94861 | 146 | ti->error = "Error initializing ESSIV hash"; |
35058687 | 147 | return PTR_ERR(hash_tfm); |
1da177e4 LT |
148 | } |
149 | ||
35058687 | 150 | saltsize = crypto_hash_digestsize(hash_tfm); |
1da177e4 LT |
151 | salt = kmalloc(saltsize, GFP_KERNEL); |
152 | if (salt == NULL) { | |
72d94861 | 153 | ti->error = "Error kmallocing salt storage in ESSIV"; |
35058687 | 154 | crypto_free_hash(hash_tfm); |
1da177e4 LT |
155 | return -ENOMEM; |
156 | } | |
157 | ||
378f058c | 158 | sg_set_buf(&sg, cc->key, cc->key_size); |
35058687 HX |
159 | desc.tfm = hash_tfm; |
160 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
161 | err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); | |
162 | crypto_free_hash(hash_tfm); | |
163 | ||
164 | if (err) { | |
165 | ti->error = "Error calculating hash in ESSIV"; | |
166 | return err; | |
167 | } | |
1da177e4 LT |
168 | |
169 | /* Setup the essiv_tfm with the given salt */ | |
d1806f6a HX |
170 | essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); |
171 | if (IS_ERR(essiv_tfm)) { | |
72d94861 | 172 | ti->error = "Error allocating crypto tfm for ESSIV"; |
1da177e4 | 173 | kfree(salt); |
d1806f6a | 174 | return PTR_ERR(essiv_tfm); |
1da177e4 | 175 | } |
d1806f6a HX |
176 | if (crypto_cipher_blocksize(essiv_tfm) != |
177 | crypto_blkcipher_ivsize(cc->tfm)) { | |
72d94861 | 178 | ti->error = "Block size of ESSIV cipher does " |
1da177e4 | 179 | "not match IV size of block cipher"; |
d1806f6a | 180 | crypto_free_cipher(essiv_tfm); |
1da177e4 LT |
181 | kfree(salt); |
182 | return -EINVAL; | |
183 | } | |
d1806f6a HX |
184 | err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); |
185 | if (err) { | |
72d94861 | 186 | ti->error = "Failed to set key for ESSIV cipher"; |
d1806f6a | 187 | crypto_free_cipher(essiv_tfm); |
1da177e4 | 188 | kfree(salt); |
d1806f6a | 189 | return err; |
1da177e4 LT |
190 | } |
191 | kfree(salt); | |
192 | ||
d1806f6a | 193 | cc->iv_gen_private = essiv_tfm; |
1da177e4 LT |
194 | return 0; |
195 | } | |
196 | ||
197 | static void crypt_iv_essiv_dtr(struct crypt_config *cc) | |
198 | { | |
d1806f6a | 199 | crypto_free_cipher(cc->iv_gen_private); |
1da177e4 LT |
200 | cc->iv_gen_private = NULL; |
201 | } | |
202 | ||
203 | static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | |
204 | { | |
1da177e4 LT |
205 | memset(iv, 0, cc->iv_size); |
206 | *(u64 *)iv = cpu_to_le64(sector); | |
d1806f6a | 207 | crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv); |
1da177e4 LT |
208 | return 0; |
209 | } | |
210 | ||
211 | static struct crypt_iv_operations crypt_iv_plain_ops = { | |
212 | .generator = crypt_iv_plain_gen | |
213 | }; | |
214 | ||
215 | static struct crypt_iv_operations crypt_iv_essiv_ops = { | |
216 | .ctr = crypt_iv_essiv_ctr, | |
217 | .dtr = crypt_iv_essiv_dtr, | |
218 | .generator = crypt_iv_essiv_gen | |
219 | }; | |
220 | ||
221 | ||
858119e1 | 222 | static int |
1da177e4 LT |
223 | crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, |
224 | struct scatterlist *in, unsigned int length, | |
225 | int write, sector_t sector) | |
226 | { | |
227 | u8 iv[cc->iv_size]; | |
d1806f6a HX |
228 | struct blkcipher_desc desc = { |
229 | .tfm = cc->tfm, | |
230 | .info = iv, | |
231 | .flags = CRYPTO_TFM_REQ_MAY_SLEEP, | |
232 | }; | |
1da177e4 LT |
233 | int r; |
234 | ||
235 | if (cc->iv_gen_ops) { | |
236 | r = cc->iv_gen_ops->generator(cc, iv, sector); | |
237 | if (r < 0) | |
238 | return r; | |
239 | ||
240 | if (write) | |
d1806f6a | 241 | r = crypto_blkcipher_encrypt_iv(&desc, out, in, length); |
1da177e4 | 242 | else |
d1806f6a | 243 | r = crypto_blkcipher_decrypt_iv(&desc, out, in, length); |
1da177e4 LT |
244 | } else { |
245 | if (write) | |
d1806f6a | 246 | r = crypto_blkcipher_encrypt(&desc, out, in, length); |
1da177e4 | 247 | else |
d1806f6a | 248 | r = crypto_blkcipher_decrypt(&desc, out, in, length); |
1da177e4 LT |
249 | } |
250 | ||
251 | return r; | |
252 | } | |
253 | ||
254 | static void | |
255 | crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, | |
256 | struct bio *bio_out, struct bio *bio_in, | |
257 | sector_t sector, int write) | |
258 | { | |
259 | ctx->bio_in = bio_in; | |
260 | ctx->bio_out = bio_out; | |
261 | ctx->offset_in = 0; | |
262 | ctx->offset_out = 0; | |
263 | ctx->idx_in = bio_in ? bio_in->bi_idx : 0; | |
264 | ctx->idx_out = bio_out ? bio_out->bi_idx : 0; | |
265 | ctx->sector = sector + cc->iv_offset; | |
266 | ctx->write = write; | |
267 | } | |
268 | ||
269 | /* | |
270 | * Encrypt / decrypt data from one bio to another one (can be the same one) | |
271 | */ | |
272 | static int crypt_convert(struct crypt_config *cc, | |
273 | struct convert_context *ctx) | |
274 | { | |
275 | int r = 0; | |
276 | ||
277 | while(ctx->idx_in < ctx->bio_in->bi_vcnt && | |
278 | ctx->idx_out < ctx->bio_out->bi_vcnt) { | |
279 | struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); | |
280 | struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); | |
281 | struct scatterlist sg_in = { | |
282 | .page = bv_in->bv_page, | |
283 | .offset = bv_in->bv_offset + ctx->offset_in, | |
284 | .length = 1 << SECTOR_SHIFT | |
285 | }; | |
286 | struct scatterlist sg_out = { | |
287 | .page = bv_out->bv_page, | |
288 | .offset = bv_out->bv_offset + ctx->offset_out, | |
289 | .length = 1 << SECTOR_SHIFT | |
290 | }; | |
291 | ||
292 | ctx->offset_in += sg_in.length; | |
293 | if (ctx->offset_in >= bv_in->bv_len) { | |
294 | ctx->offset_in = 0; | |
295 | ctx->idx_in++; | |
296 | } | |
297 | ||
298 | ctx->offset_out += sg_out.length; | |
299 | if (ctx->offset_out >= bv_out->bv_len) { | |
300 | ctx->offset_out = 0; | |
301 | ctx->idx_out++; | |
302 | } | |
303 | ||
304 | r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length, | |
305 | ctx->write, ctx->sector); | |
306 | if (r < 0) | |
307 | break; | |
308 | ||
309 | ctx->sector++; | |
310 | } | |
311 | ||
312 | return r; | |
313 | } | |
314 | ||
6a24c718 MB |
315 | static void dm_crypt_bio_destructor(struct bio *bio) |
316 | { | |
317 | struct crypt_io *io = bio->bi_private; | |
318 | struct crypt_config *cc = io->target->private; | |
319 | ||
320 | bio_free(bio, cc->bs); | |
321 | } | |
322 | ||
1da177e4 LT |
323 | /* |
324 | * Generate a new unfragmented bio with the given size | |
325 | * This should never violate the device limitations | |
326 | * May return a smaller bio when running out of pages | |
327 | */ | |
328 | static struct bio * | |
329 | crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, | |
330 | struct bio *base_bio, unsigned int *bio_vec_idx) | |
331 | { | |
8b004457 | 332 | struct bio *clone; |
1da177e4 | 333 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
b4e3ca1a | 334 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; |
1da177e4 LT |
335 | unsigned int i; |
336 | ||
6a24c718 MB |
337 | if (base_bio) { |
338 | clone = bio_alloc_bioset(GFP_NOIO, base_bio->bi_max_vecs, cc->bs); | |
339 | __bio_clone(clone, base_bio); | |
340 | } else | |
341 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); | |
342 | ||
8b004457 | 343 | if (!clone) |
1da177e4 | 344 | return NULL; |
1da177e4 | 345 | |
6a24c718 MB |
346 | clone->bi_destructor = dm_crypt_bio_destructor; |
347 | ||
1da177e4 | 348 | /* if the last bio was not complete, continue where that one ended */ |
8b004457 MB |
349 | clone->bi_idx = *bio_vec_idx; |
350 | clone->bi_vcnt = *bio_vec_idx; | |
351 | clone->bi_size = 0; | |
352 | clone->bi_flags &= ~(1 << BIO_SEG_VALID); | |
1da177e4 | 353 | |
8b004457 MB |
354 | /* clone->bi_idx pages have already been allocated */ |
355 | size -= clone->bi_idx * PAGE_SIZE; | |
1da177e4 | 356 | |
8b004457 MB |
357 | for (i = clone->bi_idx; i < nr_iovecs; i++) { |
358 | struct bio_vec *bv = bio_iovec_idx(clone, i); | |
1da177e4 LT |
359 | |
360 | bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); | |
361 | if (!bv->bv_page) | |
362 | break; | |
363 | ||
364 | /* | |
365 | * if additional pages cannot be allocated without waiting, | |
366 | * return a partially allocated bio, the caller will then try | |
367 | * to allocate additional bios while submitting this partial bio | |
368 | */ | |
8b004457 | 369 | if ((i - clone->bi_idx) == (MIN_BIO_PAGES - 1)) |
1da177e4 LT |
370 | gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; |
371 | ||
372 | bv->bv_offset = 0; | |
373 | if (size > PAGE_SIZE) | |
374 | bv->bv_len = PAGE_SIZE; | |
375 | else | |
376 | bv->bv_len = size; | |
377 | ||
8b004457 MB |
378 | clone->bi_size += bv->bv_len; |
379 | clone->bi_vcnt++; | |
1da177e4 LT |
380 | size -= bv->bv_len; |
381 | } | |
382 | ||
8b004457 MB |
383 | if (!clone->bi_size) { |
384 | bio_put(clone); | |
1da177e4 LT |
385 | return NULL; |
386 | } | |
387 | ||
388 | /* | |
389 | * Remember the last bio_vec allocated to be able | |
390 | * to correctly continue after the splitting. | |
391 | */ | |
8b004457 | 392 | *bio_vec_idx = clone->bi_vcnt; |
1da177e4 | 393 | |
8b004457 | 394 | return clone; |
1da177e4 LT |
395 | } |
396 | ||
397 | static void crypt_free_buffer_pages(struct crypt_config *cc, | |
8b004457 | 398 | struct bio *clone, unsigned int bytes) |
1da177e4 LT |
399 | { |
400 | unsigned int i, start, end; | |
401 | struct bio_vec *bv; | |
402 | ||
403 | /* | |
404 | * This is ugly, but Jens Axboe thinks that using bi_idx in the | |
405 | * endio function is too dangerous at the moment, so I calculate the | |
406 | * correct position using bi_vcnt and bi_size. | |
407 | * The bv_offset and bv_len fields might already be modified but we | |
408 | * know that we always allocated whole pages. | |
409 | * A fix to the bi_idx issue in the kernel is in the works, so | |
410 | * we will hopefully be able to revert to the cleaner solution soon. | |
411 | */ | |
8b004457 MB |
412 | i = clone->bi_vcnt - 1; |
413 | bv = bio_iovec_idx(clone, i); | |
414 | end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - clone->bi_size; | |
1da177e4 LT |
415 | start = end - bytes; |
416 | ||
417 | start >>= PAGE_SHIFT; | |
8b004457 MB |
418 | if (!clone->bi_size) |
419 | end = clone->bi_vcnt; | |
1da177e4 LT |
420 | else |
421 | end >>= PAGE_SHIFT; | |
422 | ||
8b004457 MB |
423 | for (i = start; i < end; i++) { |
424 | bv = bio_iovec_idx(clone, i); | |
1da177e4 LT |
425 | BUG_ON(!bv->bv_page); |
426 | mempool_free(bv->bv_page, cc->page_pool); | |
427 | bv->bv_page = NULL; | |
428 | } | |
429 | } | |
430 | ||
431 | /* | |
432 | * One of the bios was finished. Check for completion of | |
433 | * the whole request and correctly clean up the buffer. | |
434 | */ | |
435 | static void dec_pending(struct crypt_io *io, int error) | |
436 | { | |
437 | struct crypt_config *cc = (struct crypt_config *) io->target->private; | |
438 | ||
439 | if (error < 0) | |
440 | io->error = error; | |
441 | ||
442 | if (!atomic_dec_and_test(&io->pending)) | |
443 | return; | |
444 | ||
445 | if (io->first_clone) | |
446 | bio_put(io->first_clone); | |
447 | ||
8b004457 | 448 | bio_endio(io->base_bio, io->base_bio->bi_size, io->error); |
1da177e4 LT |
449 | |
450 | mempool_free(io, cc->io_pool); | |
451 | } | |
452 | ||
453 | /* | |
454 | * kcryptd: | |
455 | * | |
456 | * Needed because it would be very unwise to do decryption in an | |
23541d2d | 457 | * interrupt context. |
1da177e4 LT |
458 | */ |
459 | static struct workqueue_struct *_kcryptd_workqueue; | |
8b004457 | 460 | static void kcryptd_do_work(void *data); |
1da177e4 | 461 | |
8b004457 | 462 | static void kcryptd_queue_io(struct crypt_io *io) |
1da177e4 | 463 | { |
8b004457 MB |
464 | INIT_WORK(&io->work, kcryptd_do_work, io); |
465 | queue_work(_kcryptd_workqueue, &io->work); | |
466 | } | |
467 | ||
468 | static int crypt_endio(struct bio *clone, unsigned int done, int error) | |
469 | { | |
470 | struct crypt_io *io = clone->bi_private; | |
471 | struct crypt_config *cc = io->target->private; | |
472 | unsigned read_io = bio_data_dir(clone) == READ; | |
473 | ||
474 | /* | |
475 | * free the processed pages, even if | |
476 | * it's only a partially completed write | |
477 | */ | |
478 | if (!read_io) | |
479 | crypt_free_buffer_pages(cc, clone, done); | |
480 | ||
23541d2d | 481 | /* keep going - not finished yet */ |
8b004457 MB |
482 | if (unlikely(clone->bi_size)) |
483 | return 1; | |
484 | ||
8b004457 MB |
485 | if (!read_io) |
486 | goto out; | |
487 | ||
488 | if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) { | |
489 | error = -EIO; | |
490 | goto out; | |
491 | } | |
492 | ||
493 | bio_put(clone); | |
23541d2d | 494 | io->post_process = 1; |
8b004457 MB |
495 | kcryptd_queue_io(io); |
496 | return 0; | |
497 | ||
498 | out: | |
499 | bio_put(clone); | |
500 | dec_pending(io, error); | |
501 | return error; | |
502 | } | |
503 | ||
504 | static void clone_init(struct crypt_io *io, struct bio *clone) | |
505 | { | |
506 | struct crypt_config *cc = io->target->private; | |
507 | ||
508 | clone->bi_private = io; | |
509 | clone->bi_end_io = crypt_endio; | |
510 | clone->bi_bdev = cc->dev->bdev; | |
511 | clone->bi_rw = io->base_bio->bi_rw; | |
512 | } | |
513 | ||
23541d2d | 514 | static void process_read(struct crypt_io *io) |
8b004457 MB |
515 | { |
516 | struct crypt_config *cc = io->target->private; | |
517 | struct bio *base_bio = io->base_bio; | |
518 | struct bio *clone; | |
93e605c2 MB |
519 | sector_t sector = base_bio->bi_sector - io->target->begin; |
520 | ||
521 | atomic_inc(&io->pending); | |
8b004457 MB |
522 | |
523 | /* | |
524 | * The block layer might modify the bvec array, so always | |
525 | * copy the required bvecs because we need the original | |
526 | * one in order to decrypt the whole bio data *afterwards*. | |
527 | */ | |
6a24c718 | 528 | clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); |
93e605c2 MB |
529 | if (unlikely(!clone)) { |
530 | dec_pending(io, -ENOMEM); | |
23541d2d | 531 | return; |
93e605c2 | 532 | } |
8b004457 MB |
533 | |
534 | clone_init(io, clone); | |
6a24c718 | 535 | clone->bi_destructor = dm_crypt_bio_destructor; |
8b004457 MB |
536 | clone->bi_idx = 0; |
537 | clone->bi_vcnt = bio_segments(base_bio); | |
538 | clone->bi_size = base_bio->bi_size; | |
93e605c2 | 539 | clone->bi_sector = cc->start + sector; |
8b004457 MB |
540 | memcpy(clone->bi_io_vec, bio_iovec(base_bio), |
541 | sizeof(struct bio_vec) * clone->bi_vcnt); | |
8b004457 | 542 | |
93e605c2 | 543 | generic_make_request(clone); |
8b004457 MB |
544 | } |
545 | ||
23541d2d | 546 | static void process_write(struct crypt_io *io) |
8b004457 MB |
547 | { |
548 | struct crypt_config *cc = io->target->private; | |
549 | struct bio *base_bio = io->base_bio; | |
550 | struct bio *clone; | |
93e605c2 MB |
551 | struct convert_context ctx; |
552 | unsigned remaining = base_bio->bi_size; | |
553 | sector_t sector = base_bio->bi_sector - io->target->begin; | |
554 | unsigned bvec_idx = 0; | |
8b004457 | 555 | |
93e605c2 | 556 | atomic_inc(&io->pending); |
8b004457 | 557 | |
93e605c2 | 558 | crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1); |
8b004457 | 559 | |
93e605c2 MB |
560 | /* |
561 | * The allocated buffers can be smaller than the whole bio, | |
562 | * so repeat the whole process until all the data can be handled. | |
563 | */ | |
564 | while (remaining) { | |
565 | clone = crypt_alloc_buffer(cc, base_bio->bi_size, | |
566 | io->first_clone, &bvec_idx); | |
23541d2d MB |
567 | if (unlikely(!clone)) { |
568 | dec_pending(io, -ENOMEM); | |
569 | return; | |
570 | } | |
93e605c2 MB |
571 | |
572 | ctx.bio_out = clone; | |
573 | ||
574 | if (unlikely(crypt_convert(cc, &ctx) < 0)) { | |
575 | crypt_free_buffer_pages(cc, clone, clone->bi_size); | |
576 | bio_put(clone); | |
23541d2d MB |
577 | dec_pending(io, -EIO); |
578 | return; | |
93e605c2 MB |
579 | } |
580 | ||
581 | clone_init(io, clone); | |
582 | clone->bi_sector = cc->start + sector; | |
583 | ||
584 | if (!io->first_clone) { | |
585 | /* | |
586 | * hold a reference to the first clone, because it | |
587 | * holds the bio_vec array and that can't be freed | |
588 | * before all other clones are released | |
589 | */ | |
590 | bio_get(clone); | |
591 | io->first_clone = clone; | |
592 | } | |
593 | ||
93e605c2 MB |
594 | remaining -= clone->bi_size; |
595 | sector += bio_sectors(clone); | |
596 | ||
23541d2d MB |
597 | /* prevent bio_put of first_clone */ |
598 | if (remaining) | |
599 | atomic_inc(&io->pending); | |
600 | ||
93e605c2 MB |
601 | generic_make_request(clone); |
602 | ||
603 | /* out of memory -> run queues */ | |
604 | if (remaining) | |
605 | blk_congestion_wait(bio_data_dir(clone), HZ/100); | |
93e605c2 | 606 | } |
8b004457 MB |
607 | } |
608 | ||
609 | static void process_read_endio(struct crypt_io *io) | |
610 | { | |
611 | struct crypt_config *cc = io->target->private; | |
1da177e4 | 612 | struct convert_context ctx; |
1da177e4 | 613 | |
8b004457 MB |
614 | crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio, |
615 | io->base_bio->bi_sector - io->target->begin, 0); | |
1da177e4 | 616 | |
8b004457 | 617 | dec_pending(io, crypt_convert(cc, &ctx)); |
1da177e4 LT |
618 | } |
619 | ||
8b004457 | 620 | static void kcryptd_do_work(void *data) |
1da177e4 | 621 | { |
8b004457 MB |
622 | struct crypt_io *io = data; |
623 | ||
23541d2d MB |
624 | if (io->post_process) |
625 | process_read_endio(io); | |
626 | else if (bio_data_dir(io->base_bio) == READ) | |
627 | process_read(io); | |
628 | else | |
629 | process_write(io); | |
1da177e4 LT |
630 | } |
631 | ||
632 | /* | |
633 | * Decode key from its hex representation | |
634 | */ | |
635 | static int crypt_decode_key(u8 *key, char *hex, unsigned int size) | |
636 | { | |
637 | char buffer[3]; | |
638 | char *endp; | |
639 | unsigned int i; | |
640 | ||
641 | buffer[2] = '\0'; | |
642 | ||
8b004457 | 643 | for (i = 0; i < size; i++) { |
1da177e4 LT |
644 | buffer[0] = *hex++; |
645 | buffer[1] = *hex++; | |
646 | ||
647 | key[i] = (u8)simple_strtoul(buffer, &endp, 16); | |
648 | ||
649 | if (endp != &buffer[2]) | |
650 | return -EINVAL; | |
651 | } | |
652 | ||
653 | if (*hex != '\0') | |
654 | return -EINVAL; | |
655 | ||
656 | return 0; | |
657 | } | |
658 | ||
659 | /* | |
660 | * Encode key into its hex representation | |
661 | */ | |
662 | static void crypt_encode_key(char *hex, u8 *key, unsigned int size) | |
663 | { | |
664 | unsigned int i; | |
665 | ||
8b004457 | 666 | for (i = 0; i < size; i++) { |
1da177e4 LT |
667 | sprintf(hex, "%02x", *key); |
668 | hex += 2; | |
669 | key++; | |
670 | } | |
671 | } | |
672 | ||
e48d4bbf MB |
673 | static int crypt_set_key(struct crypt_config *cc, char *key) |
674 | { | |
675 | unsigned key_size = strlen(key) >> 1; | |
676 | ||
677 | if (cc->key_size && cc->key_size != key_size) | |
678 | return -EINVAL; | |
679 | ||
680 | cc->key_size = key_size; /* initial settings */ | |
681 | ||
682 | if ((!key_size && strcmp(key, "-")) || | |
683 | (key_size && crypt_decode_key(cc->key, key, key_size) < 0)) | |
684 | return -EINVAL; | |
685 | ||
686 | set_bit(DM_CRYPT_KEY_VALID, &cc->flags); | |
687 | ||
688 | return 0; | |
689 | } | |
690 | ||
691 | static int crypt_wipe_key(struct crypt_config *cc) | |
692 | { | |
693 | clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); | |
694 | memset(&cc->key, 0, cc->key_size * sizeof(u8)); | |
695 | return 0; | |
696 | } | |
697 | ||
1da177e4 LT |
698 | /* |
699 | * Construct an encryption mapping: | |
700 | * <cipher> <key> <iv_offset> <dev_path> <start> | |
701 | */ | |
702 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
703 | { | |
704 | struct crypt_config *cc; | |
d1806f6a | 705 | struct crypto_blkcipher *tfm; |
1da177e4 LT |
706 | char *tmp; |
707 | char *cipher; | |
708 | char *chainmode; | |
709 | char *ivmode; | |
710 | char *ivopts; | |
1da177e4 | 711 | unsigned int key_size; |
4ee218cd | 712 | unsigned long long tmpll; |
1da177e4 LT |
713 | |
714 | if (argc != 5) { | |
72d94861 | 715 | ti->error = "Not enough arguments"; |
1da177e4 LT |
716 | return -EINVAL; |
717 | } | |
718 | ||
719 | tmp = argv[0]; | |
720 | cipher = strsep(&tmp, "-"); | |
721 | chainmode = strsep(&tmp, "-"); | |
722 | ivopts = strsep(&tmp, "-"); | |
723 | ivmode = strsep(&ivopts, ":"); | |
724 | ||
725 | if (tmp) | |
72d94861 | 726 | DMWARN("Unexpected additional cipher options"); |
1da177e4 LT |
727 | |
728 | key_size = strlen(argv[1]) >> 1; | |
729 | ||
e48d4bbf | 730 | cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); |
1da177e4 LT |
731 | if (cc == NULL) { |
732 | ti->error = | |
72d94861 | 733 | "Cannot allocate transparent encryption context"; |
1da177e4 LT |
734 | return -ENOMEM; |
735 | } | |
736 | ||
e48d4bbf | 737 | if (crypt_set_key(cc, argv[1])) { |
72d94861 | 738 | ti->error = "Error decoding key"; |
1da177e4 LT |
739 | goto bad1; |
740 | } | |
741 | ||
742 | /* Compatiblity mode for old dm-crypt cipher strings */ | |
743 | if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) { | |
744 | chainmode = "cbc"; | |
745 | ivmode = "plain"; | |
746 | } | |
747 | ||
d1806f6a HX |
748 | if (strcmp(chainmode, "ecb") && !ivmode) { |
749 | ti->error = "This chaining mode requires an IV mechanism"; | |
1da177e4 LT |
750 | goto bad1; |
751 | } | |
752 | ||
d1806f6a HX |
753 | if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode, |
754 | cipher) >= CRYPTO_MAX_ALG_NAME) { | |
755 | ti->error = "Chain mode + cipher name is too long"; | |
1da177e4 LT |
756 | goto bad1; |
757 | } | |
758 | ||
d1806f6a HX |
759 | tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); |
760 | if (IS_ERR(tfm)) { | |
72d94861 | 761 | ti->error = "Error allocating crypto tfm"; |
1da177e4 LT |
762 | goto bad1; |
763 | } | |
1da177e4 | 764 | |
d1806f6a HX |
765 | strcpy(cc->cipher, cipher); |
766 | strcpy(cc->chainmode, chainmode); | |
1da177e4 LT |
767 | cc->tfm = tfm; |
768 | ||
769 | /* | |
770 | * Choose ivmode. Valid modes: "plain", "essiv:<esshash>". | |
771 | * See comments at iv code | |
772 | */ | |
773 | ||
774 | if (ivmode == NULL) | |
775 | cc->iv_gen_ops = NULL; | |
776 | else if (strcmp(ivmode, "plain") == 0) | |
777 | cc->iv_gen_ops = &crypt_iv_plain_ops; | |
778 | else if (strcmp(ivmode, "essiv") == 0) | |
779 | cc->iv_gen_ops = &crypt_iv_essiv_ops; | |
780 | else { | |
72d94861 | 781 | ti->error = "Invalid IV mode"; |
1da177e4 LT |
782 | goto bad2; |
783 | } | |
784 | ||
785 | if (cc->iv_gen_ops && cc->iv_gen_ops->ctr && | |
786 | cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) | |
787 | goto bad2; | |
788 | ||
d1806f6a HX |
789 | cc->iv_size = crypto_blkcipher_ivsize(tfm); |
790 | if (cc->iv_size) | |
1da177e4 | 791 | /* at least a 64 bit sector number should fit in our buffer */ |
d1806f6a | 792 | cc->iv_size = max(cc->iv_size, |
1da177e4 LT |
793 | (unsigned int)(sizeof(u64) / sizeof(u8))); |
794 | else { | |
1da177e4 | 795 | if (cc->iv_gen_ops) { |
72d94861 | 796 | DMWARN("Selected cipher does not support IVs"); |
1da177e4 LT |
797 | if (cc->iv_gen_ops->dtr) |
798 | cc->iv_gen_ops->dtr(cc); | |
799 | cc->iv_gen_ops = NULL; | |
800 | } | |
801 | } | |
802 | ||
93d2341c | 803 | cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); |
1da177e4 | 804 | if (!cc->io_pool) { |
72d94861 | 805 | ti->error = "Cannot allocate crypt io mempool"; |
1da177e4 LT |
806 | goto bad3; |
807 | } | |
808 | ||
a19b27ce | 809 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); |
1da177e4 | 810 | if (!cc->page_pool) { |
72d94861 | 811 | ti->error = "Cannot allocate page mempool"; |
1da177e4 LT |
812 | goto bad4; |
813 | } | |
814 | ||
6a24c718 MB |
815 | cc->bs = bioset_create(MIN_IOS, MIN_IOS, 4); |
816 | if (!cc->bs) { | |
817 | ti->error = "Cannot allocate crypt bioset"; | |
818 | goto bad_bs; | |
819 | } | |
820 | ||
d1806f6a | 821 | if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) { |
72d94861 | 822 | ti->error = "Error setting key"; |
1da177e4 LT |
823 | goto bad5; |
824 | } | |
825 | ||
4ee218cd | 826 | if (sscanf(argv[2], "%llu", &tmpll) != 1) { |
72d94861 | 827 | ti->error = "Invalid iv_offset sector"; |
1da177e4 LT |
828 | goto bad5; |
829 | } | |
4ee218cd | 830 | cc->iv_offset = tmpll; |
1da177e4 | 831 | |
4ee218cd | 832 | if (sscanf(argv[4], "%llu", &tmpll) != 1) { |
72d94861 | 833 | ti->error = "Invalid device sector"; |
1da177e4 LT |
834 | goto bad5; |
835 | } | |
4ee218cd | 836 | cc->start = tmpll; |
1da177e4 LT |
837 | |
838 | if (dm_get_device(ti, argv[3], cc->start, ti->len, | |
839 | dm_table_get_mode(ti->table), &cc->dev)) { | |
72d94861 | 840 | ti->error = "Device lookup failed"; |
1da177e4 LT |
841 | goto bad5; |
842 | } | |
843 | ||
844 | if (ivmode && cc->iv_gen_ops) { | |
845 | if (ivopts) | |
846 | *(ivopts - 1) = ':'; | |
847 | cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); | |
848 | if (!cc->iv_mode) { | |
72d94861 | 849 | ti->error = "Error kmallocing iv_mode string"; |
1da177e4 LT |
850 | goto bad5; |
851 | } | |
852 | strcpy(cc->iv_mode, ivmode); | |
853 | } else | |
854 | cc->iv_mode = NULL; | |
855 | ||
856 | ti->private = cc; | |
857 | return 0; | |
858 | ||
859 | bad5: | |
6a24c718 MB |
860 | bioset_free(cc->bs); |
861 | bad_bs: | |
1da177e4 LT |
862 | mempool_destroy(cc->page_pool); |
863 | bad4: | |
864 | mempool_destroy(cc->io_pool); | |
865 | bad3: | |
866 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) | |
867 | cc->iv_gen_ops->dtr(cc); | |
868 | bad2: | |
d1806f6a | 869 | crypto_free_blkcipher(tfm); |
1da177e4 | 870 | bad1: |
9d3520a3 SR |
871 | /* Must zero key material before freeing */ |
872 | memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); | |
1da177e4 LT |
873 | kfree(cc); |
874 | return -EINVAL; | |
875 | } | |
876 | ||
877 | static void crypt_dtr(struct dm_target *ti) | |
878 | { | |
879 | struct crypt_config *cc = (struct crypt_config *) ti->private; | |
880 | ||
6a24c718 | 881 | bioset_free(cc->bs); |
1da177e4 LT |
882 | mempool_destroy(cc->page_pool); |
883 | mempool_destroy(cc->io_pool); | |
884 | ||
990a8baf | 885 | kfree(cc->iv_mode); |
1da177e4 LT |
886 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) |
887 | cc->iv_gen_ops->dtr(cc); | |
d1806f6a | 888 | crypto_free_blkcipher(cc->tfm); |
1da177e4 | 889 | dm_put_device(ti, cc->dev); |
9d3520a3 SR |
890 | |
891 | /* Must zero key material before freeing */ | |
892 | memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); | |
1da177e4 LT |
893 | kfree(cc); |
894 | } | |
895 | ||
1da177e4 LT |
896 | static int crypt_map(struct dm_target *ti, struct bio *bio, |
897 | union map_info *map_context) | |
898 | { | |
8b004457 | 899 | struct crypt_config *cc = ti->private; |
e48d4bbf | 900 | struct crypt_io *io; |
1da177e4 | 901 | |
e48d4bbf | 902 | io = mempool_alloc(cc->io_pool, GFP_NOIO); |
1da177e4 | 903 | io->target = ti; |
8b004457 | 904 | io->base_bio = bio; |
1da177e4 | 905 | io->first_clone = NULL; |
23541d2d | 906 | io->error = io->post_process = 0; |
93e605c2 | 907 | atomic_set(&io->pending, 0); |
23541d2d | 908 | kcryptd_queue_io(io); |
1da177e4 | 909 | |
23541d2d | 910 | return 0; |
1da177e4 LT |
911 | } |
912 | ||
913 | static int crypt_status(struct dm_target *ti, status_type_t type, | |
914 | char *result, unsigned int maxlen) | |
915 | { | |
916 | struct crypt_config *cc = (struct crypt_config *) ti->private; | |
917 | const char *cipher; | |
918 | const char *chainmode = NULL; | |
919 | unsigned int sz = 0; | |
920 | ||
921 | switch (type) { | |
922 | case STATUSTYPE_INFO: | |
923 | result[0] = '\0'; | |
924 | break; | |
925 | ||
926 | case STATUSTYPE_TABLE: | |
d1806f6a | 927 | cipher = crypto_blkcipher_name(cc->tfm); |
1da177e4 | 928 | |
d1806f6a | 929 | chainmode = cc->chainmode; |
1da177e4 LT |
930 | |
931 | if (cc->iv_mode) | |
932 | DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode); | |
933 | else | |
934 | DMEMIT("%s-%s ", cipher, chainmode); | |
935 | ||
936 | if (cc->key_size > 0) { | |
937 | if ((maxlen - sz) < ((cc->key_size << 1) + 1)) | |
938 | return -ENOMEM; | |
939 | ||
940 | crypt_encode_key(result + sz, cc->key, cc->key_size); | |
941 | sz += cc->key_size << 1; | |
942 | } else { | |
943 | if (sz >= maxlen) | |
944 | return -ENOMEM; | |
945 | result[sz++] = '-'; | |
946 | } | |
947 | ||
4ee218cd AM |
948 | DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, |
949 | cc->dev->name, (unsigned long long)cc->start); | |
1da177e4 LT |
950 | break; |
951 | } | |
952 | return 0; | |
953 | } | |
954 | ||
e48d4bbf MB |
955 | static void crypt_postsuspend(struct dm_target *ti) |
956 | { | |
957 | struct crypt_config *cc = ti->private; | |
958 | ||
959 | set_bit(DM_CRYPT_SUSPENDED, &cc->flags); | |
960 | } | |
961 | ||
962 | static int crypt_preresume(struct dm_target *ti) | |
963 | { | |
964 | struct crypt_config *cc = ti->private; | |
965 | ||
966 | if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { | |
967 | DMERR("aborting resume - crypt key is not set."); | |
968 | return -EAGAIN; | |
969 | } | |
970 | ||
971 | return 0; | |
972 | } | |
973 | ||
974 | static void crypt_resume(struct dm_target *ti) | |
975 | { | |
976 | struct crypt_config *cc = ti->private; | |
977 | ||
978 | clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); | |
979 | } | |
980 | ||
981 | /* Message interface | |
982 | * key set <key> | |
983 | * key wipe | |
984 | */ | |
985 | static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) | |
986 | { | |
987 | struct crypt_config *cc = ti->private; | |
988 | ||
989 | if (argc < 2) | |
990 | goto error; | |
991 | ||
992 | if (!strnicmp(argv[0], MESG_STR("key"))) { | |
993 | if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { | |
994 | DMWARN("not suspended during key manipulation."); | |
995 | return -EINVAL; | |
996 | } | |
997 | if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) | |
998 | return crypt_set_key(cc, argv[2]); | |
999 | if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) | |
1000 | return crypt_wipe_key(cc); | |
1001 | } | |
1002 | ||
1003 | error: | |
1004 | DMWARN("unrecognised message received."); | |
1005 | return -EINVAL; | |
1006 | } | |
1007 | ||
1da177e4 LT |
1008 | static struct target_type crypt_target = { |
1009 | .name = "crypt", | |
23541d2d | 1010 | .version= {1, 3, 0}, |
1da177e4 LT |
1011 | .module = THIS_MODULE, |
1012 | .ctr = crypt_ctr, | |
1013 | .dtr = crypt_dtr, | |
1014 | .map = crypt_map, | |
1015 | .status = crypt_status, | |
e48d4bbf MB |
1016 | .postsuspend = crypt_postsuspend, |
1017 | .preresume = crypt_preresume, | |
1018 | .resume = crypt_resume, | |
1019 | .message = crypt_message, | |
1da177e4 LT |
1020 | }; |
1021 | ||
1022 | static int __init dm_crypt_init(void) | |
1023 | { | |
1024 | int r; | |
1025 | ||
1026 | _crypt_io_pool = kmem_cache_create("dm-crypt_io", | |
1027 | sizeof(struct crypt_io), | |
1028 | 0, 0, NULL, NULL); | |
1029 | if (!_crypt_io_pool) | |
1030 | return -ENOMEM; | |
1031 | ||
1032 | _kcryptd_workqueue = create_workqueue("kcryptd"); | |
1033 | if (!_kcryptd_workqueue) { | |
1034 | r = -ENOMEM; | |
72d94861 | 1035 | DMERR("couldn't create kcryptd"); |
1da177e4 LT |
1036 | goto bad1; |
1037 | } | |
1038 | ||
1039 | r = dm_register_target(&crypt_target); | |
1040 | if (r < 0) { | |
72d94861 | 1041 | DMERR("register failed %d", r); |
1da177e4 LT |
1042 | goto bad2; |
1043 | } | |
1044 | ||
1045 | return 0; | |
1046 | ||
1047 | bad2: | |
1048 | destroy_workqueue(_kcryptd_workqueue); | |
1049 | bad1: | |
1050 | kmem_cache_destroy(_crypt_io_pool); | |
1051 | return r; | |
1052 | } | |
1053 | ||
1054 | static void __exit dm_crypt_exit(void) | |
1055 | { | |
1056 | int r = dm_unregister_target(&crypt_target); | |
1057 | ||
1058 | if (r < 0) | |
72d94861 | 1059 | DMERR("unregister failed %d", r); |
1da177e4 LT |
1060 | |
1061 | destroy_workqueue(_kcryptd_workqueue); | |
1062 | kmem_cache_destroy(_crypt_io_pool); | |
1063 | } | |
1064 | ||
1065 | module_init(dm_crypt_init); | |
1066 | module_exit(dm_crypt_exit); | |
1067 | ||
1068 | MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); | |
1069 | MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); | |
1070 | MODULE_LICENSE("GPL"); |