Commit | Line | Data |
---|---|---|
b30ab0e0 MH |
1 | /* |
2 | * linux/fs/ext4/crypto.c | |
3 | * | |
4 | * Copyright (C) 2015, Google, Inc. | |
5 | * | |
6 | * This contains encryption functions for ext4 | |
7 | * | |
8 | * Written by Michael Halcrow, 2014. | |
9 | * | |
10 | * Filename encryption additions | |
11 | * Uday Savagaonkar, 2014 | |
12 | * Encryption policy handling additions | |
13 | * Ildar Muslukhov, 2014 | |
14 | * | |
15 | * This has not yet undergone a rigorous security audit. | |
16 | * | |
17 | * The usage of AES-XTS should conform to recommendations in NIST | |
18 | * Special Publication 800-38E and IEEE P1619/D16. | |
19 | */ | |
20 | ||
21 | #include <crypto/hash.h> | |
22 | #include <crypto/sha.h> | |
23 | #include <keys/user-type.h> | |
24 | #include <keys/encrypted-type.h> | |
25 | #include <linux/crypto.h> | |
26 | #include <linux/ecryptfs.h> | |
27 | #include <linux/gfp.h> | |
28 | #include <linux/kernel.h> | |
29 | #include <linux/key.h> | |
30 | #include <linux/list.h> | |
31 | #include <linux/mempool.h> | |
32 | #include <linux/module.h> | |
33 | #include <linux/mutex.h> | |
34 | #include <linux/random.h> | |
35 | #include <linux/scatterlist.h> | |
36 | #include <linux/spinlock_types.h> | |
37 | ||
38 | #include "ext4_extents.h" | |
39 | #include "xattr.h" | |
40 | ||
41 | /* Encryption added and removed here! (L: */ | |
42 | ||
43 | static unsigned int num_prealloc_crypto_pages = 32; | |
44 | static unsigned int num_prealloc_crypto_ctxs = 128; | |
45 | ||
46 | module_param(num_prealloc_crypto_pages, uint, 0444); | |
47 | MODULE_PARM_DESC(num_prealloc_crypto_pages, | |
48 | "Number of crypto pages to preallocate"); | |
49 | module_param(num_prealloc_crypto_ctxs, uint, 0444); | |
50 | MODULE_PARM_DESC(num_prealloc_crypto_ctxs, | |
51 | "Number of crypto contexts to preallocate"); | |
52 | ||
53 | static mempool_t *ext4_bounce_page_pool; | |
54 | ||
55 | static LIST_HEAD(ext4_free_crypto_ctxs); | |
56 | static DEFINE_SPINLOCK(ext4_crypto_ctx_lock); | |
57 | ||
8ee03714 TT |
58 | static struct kmem_cache *ext4_crypto_ctx_cachep; |
59 | struct kmem_cache *ext4_crypt_info_cachep; | |
60 | ||
b30ab0e0 MH |
61 | /** |
62 | * ext4_release_crypto_ctx() - Releases an encryption context | |
63 | * @ctx: The encryption context to release. | |
64 | * | |
65 | * If the encryption context was allocated from the pre-allocated pool, returns | |
66 | * it to that pool. Else, frees it. | |
67 | * | |
68 | * If there's a bounce page in the context, this frees that. | |
69 | */ | |
70 | void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx) | |
71 | { | |
72 | unsigned long flags; | |
73 | ||
614def70 | 74 | if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page) { |
b30ab0e0 | 75 | if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) |
614def70 | 76 | __free_page(ctx->w.bounce_page); |
b30ab0e0 | 77 | else |
614def70 | 78 | mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool); |
b30ab0e0 | 79 | } |
614def70 TT |
80 | ctx->w.bounce_page = NULL; |
81 | ctx->w.control_page = NULL; | |
b30ab0e0 | 82 | if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) { |
8ee03714 | 83 | kmem_cache_free(ext4_crypto_ctx_cachep, ctx); |
b30ab0e0 MH |
84 | } else { |
85 | spin_lock_irqsave(&ext4_crypto_ctx_lock, flags); | |
86 | list_add(&ctx->free_list, &ext4_free_crypto_ctxs); | |
87 | spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags); | |
88 | } | |
89 | } | |
90 | ||
b30ab0e0 MH |
91 | /** |
92 | * ext4_get_crypto_ctx() - Gets an encryption context | |
93 | * @inode: The inode for which we are doing the crypto | |
94 | * | |
95 | * Allocates and initializes an encryption context. | |
96 | * | |
97 | * Return: An allocated and initialized encryption context on success; error | |
98 | * value or NULL otherwise. | |
99 | */ | |
100 | struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode) | |
101 | { | |
102 | struct ext4_crypto_ctx *ctx = NULL; | |
103 | int res = 0; | |
104 | unsigned long flags; | |
b7236e21 | 105 | struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info; |
b30ab0e0 | 106 | |
b7236e21 | 107 | BUG_ON(ci == NULL); |
b30ab0e0 MH |
108 | |
109 | /* | |
110 | * We first try getting the ctx from a free list because in | |
111 | * the common case the ctx will have an allocated and | |
112 | * initialized crypto tfm, so it's probably a worthwhile | |
113 | * optimization. For the bounce page, we first try getting it | |
114 | * from the kernel allocator because that's just about as fast | |
115 | * as getting it from a list and because a cache of free pages | |
116 | * should generally be a "last resort" option for a filesystem | |
117 | * to be able to do its job. | |
118 | */ | |
119 | spin_lock_irqsave(&ext4_crypto_ctx_lock, flags); | |
120 | ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs, | |
121 | struct ext4_crypto_ctx, free_list); | |
122 | if (ctx) | |
123 | list_del(&ctx->free_list); | |
124 | spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags); | |
125 | if (!ctx) { | |
8ee03714 TT |
126 | ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS); |
127 | if (!ctx) { | |
128 | res = -ENOMEM; | |
b30ab0e0 MH |
129 | goto out; |
130 | } | |
131 | ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL; | |
132 | } else { | |
133 | ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL; | |
134 | } | |
614def70 | 135 | ctx->flags &= ~EXT4_WRITE_PATH_FL; |
b30ab0e0 | 136 | |
b30ab0e0 MH |
137 | out: |
138 | if (res) { | |
139 | if (!IS_ERR_OR_NULL(ctx)) | |
140 | ext4_release_crypto_ctx(ctx); | |
141 | ctx = ERR_PTR(res); | |
142 | } | |
143 | return ctx; | |
144 | } | |
145 | ||
146 | struct workqueue_struct *ext4_read_workqueue; | |
147 | static DEFINE_MUTEX(crypto_init); | |
148 | ||
149 | /** | |
150 | * ext4_exit_crypto() - Shutdown the ext4 encryption system | |
151 | */ | |
152 | void ext4_exit_crypto(void) | |
153 | { | |
154 | struct ext4_crypto_ctx *pos, *n; | |
155 | ||
c936e1ec | 156 | list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) |
8ee03714 | 157 | kmem_cache_free(ext4_crypto_ctx_cachep, pos); |
b30ab0e0 MH |
158 | INIT_LIST_HEAD(&ext4_free_crypto_ctxs); |
159 | if (ext4_bounce_page_pool) | |
160 | mempool_destroy(ext4_bounce_page_pool); | |
161 | ext4_bounce_page_pool = NULL; | |
162 | if (ext4_read_workqueue) | |
163 | destroy_workqueue(ext4_read_workqueue); | |
164 | ext4_read_workqueue = NULL; | |
8ee03714 TT |
165 | if (ext4_crypto_ctx_cachep) |
166 | kmem_cache_destroy(ext4_crypto_ctx_cachep); | |
167 | ext4_crypto_ctx_cachep = NULL; | |
168 | if (ext4_crypt_info_cachep) | |
169 | kmem_cache_destroy(ext4_crypt_info_cachep); | |
170 | ext4_crypt_info_cachep = NULL; | |
b30ab0e0 MH |
171 | } |
172 | ||
173 | /** | |
174 | * ext4_init_crypto() - Set up for ext4 encryption. | |
175 | * | |
176 | * We only call this when we start accessing encrypted files, since it | |
177 | * results in memory getting allocated that wouldn't otherwise be used. | |
178 | * | |
179 | * Return: Zero on success, non-zero otherwise. | |
180 | */ | |
181 | int ext4_init_crypto(void) | |
182 | { | |
8ee03714 | 183 | int i, res = -ENOMEM; |
b30ab0e0 MH |
184 | |
185 | mutex_lock(&crypto_init); | |
186 | if (ext4_read_workqueue) | |
187 | goto already_initialized; | |
188 | ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0); | |
8ee03714 TT |
189 | if (!ext4_read_workqueue) |
190 | goto fail; | |
191 | ||
192 | ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx, | |
193 | SLAB_RECLAIM_ACCOUNT); | |
194 | if (!ext4_crypto_ctx_cachep) | |
195 | goto fail; | |
196 | ||
197 | ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info, | |
198 | SLAB_RECLAIM_ACCOUNT); | |
199 | if (!ext4_crypt_info_cachep) | |
b30ab0e0 | 200 | goto fail; |
b30ab0e0 MH |
201 | |
202 | for (i = 0; i < num_prealloc_crypto_ctxs; i++) { | |
203 | struct ext4_crypto_ctx *ctx; | |
204 | ||
8ee03714 TT |
205 | ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS); |
206 | if (!ctx) { | |
207 | res = -ENOMEM; | |
b30ab0e0 MH |
208 | goto fail; |
209 | } | |
210 | list_add(&ctx->free_list, &ext4_free_crypto_ctxs); | |
211 | } | |
212 | ||
213 | ext4_bounce_page_pool = | |
214 | mempool_create_page_pool(num_prealloc_crypto_pages, 0); | |
215 | if (!ext4_bounce_page_pool) { | |
216 | res = -ENOMEM; | |
217 | goto fail; | |
218 | } | |
219 | already_initialized: | |
220 | mutex_unlock(&crypto_init); | |
221 | return 0; | |
222 | fail: | |
223 | ext4_exit_crypto(); | |
224 | mutex_unlock(&crypto_init); | |
225 | return res; | |
226 | } | |
227 | ||
228 | void ext4_restore_control_page(struct page *data_page) | |
229 | { | |
230 | struct ext4_crypto_ctx *ctx = | |
231 | (struct ext4_crypto_ctx *)page_private(data_page); | |
232 | ||
233 | set_page_private(data_page, (unsigned long)NULL); | |
234 | ClearPagePrivate(data_page); | |
235 | unlock_page(data_page); | |
236 | ext4_release_crypto_ctx(ctx); | |
237 | } | |
238 | ||
239 | /** | |
240 | * ext4_crypt_complete() - The completion callback for page encryption | |
241 | * @req: The asynchronous encryption request context | |
242 | * @res: The result of the encryption operation | |
243 | */ | |
244 | static void ext4_crypt_complete(struct crypto_async_request *req, int res) | |
245 | { | |
246 | struct ext4_completion_result *ecr = req->data; | |
247 | ||
248 | if (res == -EINPROGRESS) | |
249 | return; | |
250 | ecr->res = res; | |
251 | complete(&ecr->completion); | |
252 | } | |
253 | ||
254 | typedef enum { | |
255 | EXT4_DECRYPT = 0, | |
256 | EXT4_ENCRYPT, | |
257 | } ext4_direction_t; | |
258 | ||
259 | static int ext4_page_crypto(struct ext4_crypto_ctx *ctx, | |
260 | struct inode *inode, | |
261 | ext4_direction_t rw, | |
262 | pgoff_t index, | |
263 | struct page *src_page, | |
264 | struct page *dest_page) | |
265 | ||
266 | { | |
267 | u8 xts_tweak[EXT4_XTS_TWEAK_SIZE]; | |
268 | struct ablkcipher_request *req = NULL; | |
269 | DECLARE_EXT4_COMPLETION_RESULT(ecr); | |
270 | struct scatterlist dst, src; | |
c936e1ec TT |
271 | struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info; |
272 | struct crypto_ablkcipher *tfm = ci->ci_ctfm; | |
b30ab0e0 MH |
273 | int res = 0; |
274 | ||
c936e1ec | 275 | req = ablkcipher_request_alloc(tfm, GFP_NOFS); |
b30ab0e0 MH |
276 | if (!req) { |
277 | printk_ratelimited(KERN_ERR | |
278 | "%s: crypto_request_alloc() failed\n", | |
279 | __func__); | |
280 | return -ENOMEM; | |
281 | } | |
282 | ablkcipher_request_set_callback( | |
283 | req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | |
284 | ext4_crypt_complete, &ecr); | |
285 | ||
286 | BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index)); | |
287 | memcpy(xts_tweak, &index, sizeof(index)); | |
288 | memset(&xts_tweak[sizeof(index)], 0, | |
289 | EXT4_XTS_TWEAK_SIZE - sizeof(index)); | |
290 | ||
291 | sg_init_table(&dst, 1); | |
292 | sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); | |
293 | sg_init_table(&src, 1); | |
294 | sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); | |
295 | ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, | |
296 | xts_tweak); | |
297 | if (rw == EXT4_DECRYPT) | |
298 | res = crypto_ablkcipher_decrypt(req); | |
299 | else | |
300 | res = crypto_ablkcipher_encrypt(req); | |
301 | if (res == -EINPROGRESS || res == -EBUSY) { | |
302 | BUG_ON(req->base.data != &ecr); | |
303 | wait_for_completion(&ecr.completion); | |
304 | res = ecr.res; | |
305 | } | |
306 | ablkcipher_request_free(req); | |
307 | if (res) { | |
308 | printk_ratelimited( | |
309 | KERN_ERR | |
310 | "%s: crypto_ablkcipher_encrypt() returned %d\n", | |
311 | __func__, res); | |
312 | return res; | |
313 | } | |
314 | return 0; | |
315 | } | |
316 | ||
317 | /** | |
318 | * ext4_encrypt() - Encrypts a page | |
319 | * @inode: The inode for which the encryption should take place | |
320 | * @plaintext_page: The page to encrypt. Must be locked. | |
321 | * | |
322 | * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx | |
323 | * encryption context. | |
324 | * | |
325 | * Called on the page write path. The caller must call | |
326 | * ext4_restore_control_page() on the returned ciphertext page to | |
327 | * release the bounce buffer and the encryption context. | |
328 | * | |
329 | * Return: An allocated page with the encrypted content on success. Else, an | |
330 | * error value or NULL. | |
331 | */ | |
332 | struct page *ext4_encrypt(struct inode *inode, | |
333 | struct page *plaintext_page) | |
334 | { | |
335 | struct ext4_crypto_ctx *ctx; | |
336 | struct page *ciphertext_page = NULL; | |
337 | int err; | |
338 | ||
339 | BUG_ON(!PageLocked(plaintext_page)); | |
340 | ||
341 | ctx = ext4_get_crypto_ctx(inode); | |
342 | if (IS_ERR(ctx)) | |
343 | return (struct page *) ctx; | |
344 | ||
345 | /* The encryption operation will require a bounce page. */ | |
346 | ciphertext_page = alloc_page(GFP_NOFS); | |
347 | if (!ciphertext_page) { | |
348 | /* This is a potential bottleneck, but at least we'll have | |
349 | * forward progress. */ | |
350 | ciphertext_page = mempool_alloc(ext4_bounce_page_pool, | |
351 | GFP_NOFS); | |
352 | if (WARN_ON_ONCE(!ciphertext_page)) { | |
353 | ciphertext_page = mempool_alloc(ext4_bounce_page_pool, | |
354 | GFP_NOFS | __GFP_WAIT); | |
355 | } | |
356 | ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; | |
357 | } else { | |
358 | ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; | |
359 | } | |
614def70 TT |
360 | ctx->flags |= EXT4_WRITE_PATH_FL; |
361 | ctx->w.bounce_page = ciphertext_page; | |
362 | ctx->w.control_page = plaintext_page; | |
b30ab0e0 MH |
363 | err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index, |
364 | plaintext_page, ciphertext_page); | |
365 | if (err) { | |
366 | ext4_release_crypto_ctx(ctx); | |
367 | return ERR_PTR(err); | |
368 | } | |
369 | SetPagePrivate(ciphertext_page); | |
370 | set_page_private(ciphertext_page, (unsigned long)ctx); | |
371 | lock_page(ciphertext_page); | |
372 | return ciphertext_page; | |
373 | } | |
374 | ||
375 | /** | |
376 | * ext4_decrypt() - Decrypts a page in-place | |
377 | * @ctx: The encryption context. | |
378 | * @page: The page to decrypt. Must be locked. | |
379 | * | |
380 | * Decrypts page in-place using the ctx encryption context. | |
381 | * | |
382 | * Called from the read completion callback. | |
383 | * | |
384 | * Return: Zero on success, non-zero otherwise. | |
385 | */ | |
386 | int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page) | |
387 | { | |
388 | BUG_ON(!PageLocked(page)); | |
389 | ||
390 | return ext4_page_crypto(ctx, page->mapping->host, | |
391 | EXT4_DECRYPT, page->index, page, page); | |
392 | } | |
393 | ||
394 | /* | |
395 | * Convenience function which takes care of allocating and | |
396 | * deallocating the encryption context | |
397 | */ | |
398 | int ext4_decrypt_one(struct inode *inode, struct page *page) | |
399 | { | |
400 | int ret; | |
401 | ||
402 | struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(inode); | |
403 | ||
404 | if (!ctx) | |
405 | return -ENOMEM; | |
406 | ret = ext4_decrypt(ctx, page); | |
407 | ext4_release_crypto_ctx(ctx); | |
408 | return ret; | |
409 | } | |
410 | ||
411 | int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex) | |
412 | { | |
413 | struct ext4_crypto_ctx *ctx; | |
414 | struct page *ciphertext_page = NULL; | |
415 | struct bio *bio; | |
416 | ext4_lblk_t lblk = ex->ee_block; | |
417 | ext4_fsblk_t pblk = ext4_ext_pblock(ex); | |
418 | unsigned int len = ext4_ext_get_actual_len(ex); | |
419 | int err = 0; | |
420 | ||
421 | BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); | |
422 | ||
423 | ctx = ext4_get_crypto_ctx(inode); | |
424 | if (IS_ERR(ctx)) | |
425 | return PTR_ERR(ctx); | |
426 | ||
427 | ciphertext_page = alloc_page(GFP_NOFS); | |
428 | if (!ciphertext_page) { | |
429 | /* This is a potential bottleneck, but at least we'll have | |
430 | * forward progress. */ | |
431 | ciphertext_page = mempool_alloc(ext4_bounce_page_pool, | |
432 | GFP_NOFS); | |
433 | if (WARN_ON_ONCE(!ciphertext_page)) { | |
434 | ciphertext_page = mempool_alloc(ext4_bounce_page_pool, | |
435 | GFP_NOFS | __GFP_WAIT); | |
436 | } | |
437 | ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; | |
438 | } else { | |
439 | ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; | |
440 | } | |
614def70 | 441 | ctx->w.bounce_page = ciphertext_page; |
b30ab0e0 MH |
442 | |
443 | while (len--) { | |
444 | err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk, | |
445 | ZERO_PAGE(0), ciphertext_page); | |
446 | if (err) | |
447 | goto errout; | |
448 | ||
449 | bio = bio_alloc(GFP_KERNEL, 1); | |
450 | if (!bio) { | |
451 | err = -ENOMEM; | |
452 | goto errout; | |
453 | } | |
454 | bio->bi_bdev = inode->i_sb->s_bdev; | |
455 | bio->bi_iter.bi_sector = pblk; | |
456 | err = bio_add_page(bio, ciphertext_page, | |
457 | inode->i_sb->s_blocksize, 0); | |
458 | if (err) { | |
459 | bio_put(bio); | |
460 | goto errout; | |
461 | } | |
462 | err = submit_bio_wait(WRITE, bio); | |
463 | if (err) | |
464 | goto errout; | |
465 | } | |
466 | err = 0; | |
467 | errout: | |
468 | ext4_release_crypto_ctx(ctx); | |
469 | return err; | |
470 | } | |
471 | ||
472 | bool ext4_valid_contents_enc_mode(uint32_t mode) | |
473 | { | |
474 | return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS); | |
475 | } | |
476 | ||
477 | /** | |
478 | * ext4_validate_encryption_key_size() - Validate the encryption key size | |
479 | * @mode: The key mode. | |
480 | * @size: The key size to validate. | |
481 | * | |
482 | * Return: The validated key size for @mode. Zero if invalid. | |
483 | */ | |
484 | uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size) | |
485 | { | |
486 | if (size == ext4_encryption_key_size(mode)) | |
487 | return size; | |
488 | return 0; | |
489 | } |