Commit | Line | Data |
---|---|---|
57e5055b JK |
1 | /* |
2 | * linux/fs/f2fs/crypto.c | |
3 | * | |
4 | * Copied from linux/fs/ext4/crypto.c | |
5 | * | |
6 | * Copyright (C) 2015, Google, Inc. | |
7 | * Copyright (C) 2015, Motorola Mobility | |
8 | * | |
9 | * This contains encryption functions for f2fs | |
10 | * | |
11 | * Written by Michael Halcrow, 2014. | |
12 | * | |
13 | * Filename encryption additions | |
14 | * Uday Savagaonkar, 2014 | |
15 | * Encryption policy handling additions | |
16 | * Ildar Muslukhov, 2014 | |
17 | * Remove ext4_encrypted_zeroout(), | |
18 | * add f2fs_restore_and_release_control_page() | |
19 | * Jaegeuk Kim, 2015. | |
20 | * | |
21 | * This has not yet undergone a rigorous security audit. | |
22 | * | |
23 | * The usage of AES-XTS should conform to recommendations in NIST | |
24 | * Special Publication 800-38E and IEEE P1619/D16. | |
25 | */ | |
26 | #include <crypto/hash.h> | |
27 | #include <crypto/sha.h> | |
28 | #include <keys/user-type.h> | |
29 | #include <keys/encrypted-type.h> | |
30 | #include <linux/crypto.h> | |
31 | #include <linux/ecryptfs.h> | |
32 | #include <linux/gfp.h> | |
33 | #include <linux/kernel.h> | |
34 | #include <linux/key.h> | |
35 | #include <linux/list.h> | |
36 | #include <linux/mempool.h> | |
37 | #include <linux/module.h> | |
38 | #include <linux/mutex.h> | |
39 | #include <linux/random.h> | |
40 | #include <linux/scatterlist.h> | |
41 | #include <linux/spinlock_types.h> | |
42 | #include <linux/f2fs_fs.h> | |
43 | #include <linux/ratelimit.h> | |
44 | #include <linux/bio.h> | |
45 | ||
46 | #include "f2fs.h" | |
47 | #include "xattr.h" | |
48 | ||
49 | /* Encryption added and removed here! (L: */ | |
50 | ||
51 | static unsigned int num_prealloc_crypto_pages = 32; | |
52 | static unsigned int num_prealloc_crypto_ctxs = 128; | |
53 | ||
54 | module_param(num_prealloc_crypto_pages, uint, 0444); | |
55 | MODULE_PARM_DESC(num_prealloc_crypto_pages, | |
56 | "Number of crypto pages to preallocate"); | |
57 | module_param(num_prealloc_crypto_ctxs, uint, 0444); | |
58 | MODULE_PARM_DESC(num_prealloc_crypto_ctxs, | |
59 | "Number of crypto contexts to preallocate"); | |
60 | ||
61 | static mempool_t *f2fs_bounce_page_pool; | |
62 | ||
63 | static LIST_HEAD(f2fs_free_crypto_ctxs); | |
64 | static DEFINE_SPINLOCK(f2fs_crypto_ctx_lock); | |
65 | ||
66 | struct workqueue_struct *f2fs_read_workqueue; | |
67 | static DEFINE_MUTEX(crypto_init); | |
68 | ||
69 | /** | |
70 | * f2fs_release_crypto_ctx() - Releases an encryption context | |
71 | * @ctx: The encryption context to release. | |
72 | * | |
73 | * If the encryption context was allocated from the pre-allocated pool, returns | |
74 | * it to that pool. Else, frees it. | |
75 | * | |
76 | * If there's a bounce page in the context, this frees that. | |
77 | */ | |
78 | void f2fs_release_crypto_ctx(struct f2fs_crypto_ctx *ctx) | |
79 | { | |
80 | unsigned long flags; | |
81 | ||
82 | if (ctx->bounce_page) { | |
83 | if (ctx->flags & F2FS_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) | |
84 | __free_page(ctx->bounce_page); | |
85 | else | |
86 | mempool_free(ctx->bounce_page, f2fs_bounce_page_pool); | |
87 | ctx->bounce_page = NULL; | |
88 | } | |
89 | ctx->control_page = NULL; | |
90 | if (ctx->flags & F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL) { | |
91 | if (ctx->tfm) | |
92 | crypto_free_tfm(ctx->tfm); | |
93 | kfree(ctx); | |
94 | } else { | |
95 | spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags); | |
96 | list_add(&ctx->free_list, &f2fs_free_crypto_ctxs); | |
97 | spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags); | |
98 | } | |
99 | } | |
100 | ||
101 | /** | |
102 | * f2fs_alloc_and_init_crypto_ctx() - Allocates and inits an encryption context | |
103 | * @mask: The allocation mask. | |
104 | * | |
105 | * Return: An allocated and initialized encryption context on success. An error | |
106 | * value or NULL otherwise. | |
107 | */ | |
108 | static struct f2fs_crypto_ctx *f2fs_alloc_and_init_crypto_ctx(gfp_t mask) | |
109 | { | |
110 | struct f2fs_crypto_ctx *ctx = kzalloc(sizeof(struct f2fs_crypto_ctx), | |
111 | mask); | |
112 | ||
113 | if (!ctx) | |
114 | return ERR_PTR(-ENOMEM); | |
115 | return ctx; | |
116 | } | |
117 | ||
118 | /** | |
119 | * f2fs_get_crypto_ctx() - Gets an encryption context | |
120 | * @inode: The inode for which we are doing the crypto | |
121 | * | |
122 | * Allocates and initializes an encryption context. | |
123 | * | |
124 | * Return: An allocated and initialized encryption context on success; error | |
125 | * value or NULL otherwise. | |
126 | */ | |
127 | struct f2fs_crypto_ctx *f2fs_get_crypto_ctx(struct inode *inode) | |
128 | { | |
129 | struct f2fs_crypto_ctx *ctx = NULL; | |
130 | int res = 0; | |
131 | unsigned long flags; | |
132 | struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info; | |
133 | ||
edf3fb8e JK |
134 | if (ci == NULL) |
135 | return ERR_PTR(-EACCES); | |
136 | ||
57e5055b JK |
137 | /* |
138 | * We first try getting the ctx from a free list because in | |
139 | * the common case the ctx will have an allocated and | |
140 | * initialized crypto tfm, so it's probably a worthwhile | |
141 | * optimization. For the bounce page, we first try getting it | |
142 | * from the kernel allocator because that's just about as fast | |
143 | * as getting it from a list and because a cache of free pages | |
144 | * should generally be a "last resort" option for a filesystem | |
145 | * to be able to do its job. | |
146 | */ | |
147 | spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags); | |
148 | ctx = list_first_entry_or_null(&f2fs_free_crypto_ctxs, | |
149 | struct f2fs_crypto_ctx, free_list); | |
150 | if (ctx) | |
151 | list_del(&ctx->free_list); | |
152 | spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags); | |
153 | if (!ctx) { | |
154 | ctx = f2fs_alloc_and_init_crypto_ctx(GFP_NOFS); | |
155 | if (IS_ERR(ctx)) { | |
156 | res = PTR_ERR(ctx); | |
157 | goto out; | |
158 | } | |
159 | ctx->flags |= F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL; | |
160 | } else { | |
161 | ctx->flags &= ~F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL; | |
162 | } | |
163 | ||
164 | /* | |
165 | * Allocate a new Crypto API context if we don't already have | |
166 | * one or if it isn't the right mode. | |
167 | */ | |
168 | BUG_ON(ci->ci_mode == F2FS_ENCRYPTION_MODE_INVALID); | |
169 | if (ctx->tfm && (ctx->mode != ci->ci_mode)) { | |
170 | crypto_free_tfm(ctx->tfm); | |
171 | ctx->tfm = NULL; | |
172 | ctx->mode = F2FS_ENCRYPTION_MODE_INVALID; | |
173 | } | |
174 | if (!ctx->tfm) { | |
175 | switch (ci->ci_mode) { | |
176 | case F2FS_ENCRYPTION_MODE_AES_256_XTS: | |
177 | ctx->tfm = crypto_ablkcipher_tfm( | |
178 | crypto_alloc_ablkcipher("xts(aes)", 0, 0)); | |
179 | break; | |
180 | case F2FS_ENCRYPTION_MODE_AES_256_GCM: | |
181 | /* | |
182 | * TODO(mhalcrow): AEAD w/ gcm(aes); | |
183 | * crypto_aead_setauthsize() | |
184 | */ | |
185 | ctx->tfm = ERR_PTR(-ENOTSUPP); | |
186 | break; | |
187 | default: | |
188 | BUG(); | |
189 | } | |
190 | if (IS_ERR_OR_NULL(ctx->tfm)) { | |
191 | res = PTR_ERR(ctx->tfm); | |
192 | ctx->tfm = NULL; | |
193 | goto out; | |
194 | } | |
195 | ctx->mode = ci->ci_mode; | |
196 | } | |
197 | BUG_ON(ci->ci_size != f2fs_encryption_key_size(ci->ci_mode)); | |
198 | ||
199 | /* | |
200 | * There shouldn't be a bounce page attached to the crypto | |
201 | * context at this point. | |
202 | */ | |
203 | BUG_ON(ctx->bounce_page); | |
204 | ||
205 | out: | |
206 | if (res) { | |
207 | if (!IS_ERR_OR_NULL(ctx)) | |
208 | f2fs_release_crypto_ctx(ctx); | |
209 | ctx = ERR_PTR(res); | |
210 | } | |
211 | return ctx; | |
212 | } | |
213 | ||
214 | /* | |
215 | * Call f2fs_decrypt on every single page, reusing the encryption | |
216 | * context. | |
217 | */ | |
218 | static void completion_pages(struct work_struct *work) | |
219 | { | |
220 | struct f2fs_crypto_ctx *ctx = | |
221 | container_of(work, struct f2fs_crypto_ctx, work); | |
222 | struct bio *bio = ctx->bio; | |
223 | struct bio_vec *bv; | |
224 | int i; | |
225 | ||
226 | bio_for_each_segment_all(bv, bio, i) { | |
227 | struct page *page = bv->bv_page; | |
228 | int ret = f2fs_decrypt(ctx, page); | |
229 | ||
230 | if (ret) { | |
231 | WARN_ON_ONCE(1); | |
232 | SetPageError(page); | |
233 | } else | |
234 | SetPageUptodate(page); | |
235 | unlock_page(page); | |
236 | } | |
237 | f2fs_release_crypto_ctx(ctx); | |
238 | bio_put(bio); | |
239 | } | |
240 | ||
241 | void f2fs_end_io_crypto_work(struct f2fs_crypto_ctx *ctx, struct bio *bio) | |
242 | { | |
243 | INIT_WORK(&ctx->work, completion_pages); | |
244 | ctx->bio = bio; | |
245 | queue_work(f2fs_read_workqueue, &ctx->work); | |
246 | } | |
247 | ||
248 | /** | |
249 | * f2fs_exit_crypto() - Shutdown the f2fs encryption system | |
250 | */ | |
251 | void f2fs_exit_crypto(void) | |
252 | { | |
253 | struct f2fs_crypto_ctx *pos, *n; | |
254 | ||
255 | list_for_each_entry_safe(pos, n, &f2fs_free_crypto_ctxs, free_list) { | |
256 | if (pos->bounce_page) { | |
257 | if (pos->flags & | |
258 | F2FS_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) | |
259 | __free_page(pos->bounce_page); | |
260 | else | |
261 | mempool_free(pos->bounce_page, | |
262 | f2fs_bounce_page_pool); | |
263 | } | |
264 | if (pos->tfm) | |
265 | crypto_free_tfm(pos->tfm); | |
266 | kfree(pos); | |
267 | } | |
268 | INIT_LIST_HEAD(&f2fs_free_crypto_ctxs); | |
269 | if (f2fs_bounce_page_pool) | |
270 | mempool_destroy(f2fs_bounce_page_pool); | |
271 | f2fs_bounce_page_pool = NULL; | |
272 | if (f2fs_read_workqueue) | |
273 | destroy_workqueue(f2fs_read_workqueue); | |
274 | f2fs_read_workqueue = NULL; | |
275 | } | |
276 | ||
277 | /** | |
278 | * f2fs_init_crypto() - Set up for f2fs encryption. | |
279 | * | |
280 | * We only call this when we start accessing encrypted files, since it | |
281 | * results in memory getting allocated that wouldn't otherwise be used. | |
282 | * | |
283 | * Return: Zero on success, non-zero otherwise. | |
284 | */ | |
285 | int f2fs_init_crypto(void) | |
286 | { | |
287 | int i, res; | |
288 | ||
289 | mutex_lock(&crypto_init); | |
290 | if (f2fs_read_workqueue) | |
291 | goto already_initialized; | |
292 | ||
293 | f2fs_read_workqueue = alloc_workqueue("f2fs_crypto", WQ_HIGHPRI, 0); | |
294 | if (!f2fs_read_workqueue) { | |
295 | res = -ENOMEM; | |
296 | goto fail; | |
297 | } | |
298 | ||
299 | for (i = 0; i < num_prealloc_crypto_ctxs; i++) { | |
300 | struct f2fs_crypto_ctx *ctx; | |
301 | ||
302 | ctx = f2fs_alloc_and_init_crypto_ctx(GFP_KERNEL); | |
303 | if (IS_ERR(ctx)) { | |
304 | res = PTR_ERR(ctx); | |
305 | goto fail; | |
306 | } | |
307 | list_add(&ctx->free_list, &f2fs_free_crypto_ctxs); | |
308 | } | |
309 | ||
310 | f2fs_bounce_page_pool = | |
311 | mempool_create_page_pool(num_prealloc_crypto_pages, 0); | |
312 | if (!f2fs_bounce_page_pool) { | |
313 | res = -ENOMEM; | |
314 | goto fail; | |
315 | } | |
316 | already_initialized: | |
317 | mutex_unlock(&crypto_init); | |
318 | return 0; | |
319 | fail: | |
320 | f2fs_exit_crypto(); | |
321 | mutex_unlock(&crypto_init); | |
322 | return res; | |
323 | } | |
324 | ||
325 | void f2fs_restore_and_release_control_page(struct page **page) | |
326 | { | |
327 | struct f2fs_crypto_ctx *ctx; | |
328 | struct page *bounce_page; | |
329 | ||
330 | /* The bounce data pages are unmapped. */ | |
331 | if ((*page)->mapping) | |
332 | return; | |
333 | ||
334 | /* The bounce data page is unmapped. */ | |
335 | bounce_page = *page; | |
336 | ctx = (struct f2fs_crypto_ctx *)page_private(bounce_page); | |
337 | ||
338 | /* restore control page */ | |
339 | *page = ctx->control_page; | |
340 | ||
341 | f2fs_restore_control_page(bounce_page); | |
342 | } | |
343 | ||
344 | void f2fs_restore_control_page(struct page *data_page) | |
345 | { | |
346 | struct f2fs_crypto_ctx *ctx = | |
347 | (struct f2fs_crypto_ctx *)page_private(data_page); | |
348 | ||
349 | set_page_private(data_page, (unsigned long)NULL); | |
350 | ClearPagePrivate(data_page); | |
351 | unlock_page(data_page); | |
352 | f2fs_release_crypto_ctx(ctx); | |
353 | } | |
354 | ||
355 | /** | |
356 | * f2fs_crypt_complete() - The completion callback for page encryption | |
357 | * @req: The asynchronous encryption request context | |
358 | * @res: The result of the encryption operation | |
359 | */ | |
360 | static void f2fs_crypt_complete(struct crypto_async_request *req, int res) | |
361 | { | |
362 | struct f2fs_completion_result *ecr = req->data; | |
363 | ||
364 | if (res == -EINPROGRESS) | |
365 | return; | |
366 | ecr->res = res; | |
367 | complete(&ecr->completion); | |
368 | } | |
369 | ||
370 | typedef enum { | |
371 | F2FS_DECRYPT = 0, | |
372 | F2FS_ENCRYPT, | |
373 | } f2fs_direction_t; | |
374 | ||
375 | static int f2fs_page_crypto(struct f2fs_crypto_ctx *ctx, | |
376 | struct inode *inode, | |
377 | f2fs_direction_t rw, | |
378 | pgoff_t index, | |
379 | struct page *src_page, | |
380 | struct page *dest_page) | |
381 | { | |
382 | u8 xts_tweak[F2FS_XTS_TWEAK_SIZE]; | |
383 | struct ablkcipher_request *req = NULL; | |
384 | DECLARE_F2FS_COMPLETION_RESULT(ecr); | |
385 | struct scatterlist dst, src; | |
386 | struct f2fs_inode_info *fi = F2FS_I(inode); | |
387 | struct crypto_ablkcipher *atfm = __crypto_ablkcipher_cast(ctx->tfm); | |
388 | int res = 0; | |
389 | ||
390 | BUG_ON(!ctx->tfm); | |
391 | BUG_ON(ctx->mode != fi->i_crypt_info->ci_mode); | |
392 | ||
393 | if (ctx->mode != F2FS_ENCRYPTION_MODE_AES_256_XTS) { | |
394 | printk_ratelimited(KERN_ERR | |
395 | "%s: unsupported crypto algorithm: %d\n", | |
396 | __func__, ctx->mode); | |
397 | return -ENOTSUPP; | |
398 | } | |
399 | ||
400 | crypto_ablkcipher_clear_flags(atfm, ~0); | |
401 | crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_REQ_WEAK_KEY); | |
402 | ||
403 | res = crypto_ablkcipher_setkey(atfm, fi->i_crypt_info->ci_raw, | |
404 | fi->i_crypt_info->ci_size); | |
405 | if (res) { | |
406 | printk_ratelimited(KERN_ERR | |
407 | "%s: crypto_ablkcipher_setkey() failed\n", | |
408 | __func__); | |
409 | return res; | |
410 | } | |
411 | req = ablkcipher_request_alloc(atfm, GFP_NOFS); | |
412 | if (!req) { | |
413 | printk_ratelimited(KERN_ERR | |
414 | "%s: crypto_request_alloc() failed\n", | |
415 | __func__); | |
416 | return -ENOMEM; | |
417 | } | |
418 | ablkcipher_request_set_callback( | |
419 | req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | |
420 | f2fs_crypt_complete, &ecr); | |
421 | ||
422 | BUILD_BUG_ON(F2FS_XTS_TWEAK_SIZE < sizeof(index)); | |
423 | memcpy(xts_tweak, &index, sizeof(index)); | |
424 | memset(&xts_tweak[sizeof(index)], 0, | |
425 | F2FS_XTS_TWEAK_SIZE - sizeof(index)); | |
426 | ||
427 | sg_init_table(&dst, 1); | |
428 | sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); | |
429 | sg_init_table(&src, 1); | |
430 | sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); | |
431 | ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, | |
432 | xts_tweak); | |
433 | if (rw == F2FS_DECRYPT) | |
434 | res = crypto_ablkcipher_decrypt(req); | |
435 | else | |
436 | res = crypto_ablkcipher_encrypt(req); | |
437 | if (res == -EINPROGRESS || res == -EBUSY) { | |
438 | BUG_ON(req->base.data != &ecr); | |
439 | wait_for_completion(&ecr.completion); | |
440 | res = ecr.res; | |
441 | } | |
442 | ablkcipher_request_free(req); | |
443 | if (res) { | |
444 | printk_ratelimited(KERN_ERR | |
445 | "%s: crypto_ablkcipher_encrypt() returned %d\n", | |
446 | __func__, res); | |
447 | return res; | |
448 | } | |
449 | return 0; | |
450 | } | |
451 | ||
452 | /** | |
453 | * f2fs_encrypt() - Encrypts a page | |
454 | * @inode: The inode for which the encryption should take place | |
455 | * @plaintext_page: The page to encrypt. Must be locked. | |
456 | * | |
457 | * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx | |
458 | * encryption context. | |
459 | * | |
460 | * Called on the page write path. The caller must call | |
461 | * f2fs_restore_control_page() on the returned ciphertext page to | |
462 | * release the bounce buffer and the encryption context. | |
463 | * | |
464 | * Return: An allocated page with the encrypted content on success. Else, an | |
465 | * error value or NULL. | |
466 | */ | |
467 | struct page *f2fs_encrypt(struct inode *inode, | |
468 | struct page *plaintext_page) | |
469 | { | |
470 | struct f2fs_crypto_ctx *ctx; | |
471 | struct page *ciphertext_page = NULL; | |
472 | int err; | |
473 | ||
474 | BUG_ON(!PageLocked(plaintext_page)); | |
475 | ||
476 | ctx = f2fs_get_crypto_ctx(inode); | |
477 | if (IS_ERR(ctx)) | |
478 | return (struct page *)ctx; | |
479 | ||
480 | /* The encryption operation will require a bounce page. */ | |
481 | ciphertext_page = alloc_page(GFP_NOFS); | |
482 | if (!ciphertext_page) { | |
483 | /* | |
484 | * This is a potential bottleneck, but at least we'll have | |
485 | * forward progress. | |
486 | */ | |
487 | ciphertext_page = mempool_alloc(f2fs_bounce_page_pool, | |
488 | GFP_NOFS); | |
489 | if (WARN_ON_ONCE(!ciphertext_page)) | |
490 | ciphertext_page = mempool_alloc(f2fs_bounce_page_pool, | |
491 | GFP_NOFS | __GFP_WAIT); | |
492 | ctx->flags &= ~F2FS_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; | |
493 | } else { | |
494 | ctx->flags |= F2FS_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; | |
495 | } | |
496 | ctx->bounce_page = ciphertext_page; | |
497 | ctx->control_page = plaintext_page; | |
498 | err = f2fs_page_crypto(ctx, inode, F2FS_ENCRYPT, plaintext_page->index, | |
499 | plaintext_page, ciphertext_page); | |
500 | if (err) { | |
501 | f2fs_release_crypto_ctx(ctx); | |
502 | return ERR_PTR(err); | |
503 | } | |
504 | SetPagePrivate(ciphertext_page); | |
505 | set_page_private(ciphertext_page, (unsigned long)ctx); | |
506 | lock_page(ciphertext_page); | |
507 | return ciphertext_page; | |
508 | } | |
509 | ||
510 | /** | |
511 | * f2fs_decrypt() - Decrypts a page in-place | |
512 | * @ctx: The encryption context. | |
513 | * @page: The page to decrypt. Must be locked. | |
514 | * | |
515 | * Decrypts page in-place using the ctx encryption context. | |
516 | * | |
517 | * Called from the read completion callback. | |
518 | * | |
519 | * Return: Zero on success, non-zero otherwise. | |
520 | */ | |
521 | int f2fs_decrypt(struct f2fs_crypto_ctx *ctx, struct page *page) | |
522 | { | |
523 | BUG_ON(!PageLocked(page)); | |
524 | ||
525 | return f2fs_page_crypto(ctx, page->mapping->host, | |
526 | F2FS_DECRYPT, page->index, page, page); | |
527 | } | |
528 | ||
529 | /* | |
530 | * Convenience function which takes care of allocating and | |
531 | * deallocating the encryption context | |
532 | */ | |
533 | int f2fs_decrypt_one(struct inode *inode, struct page *page) | |
534 | { | |
535 | struct f2fs_crypto_ctx *ctx = f2fs_get_crypto_ctx(inode); | |
536 | int ret; | |
537 | ||
538 | if (!ctx) | |
539 | return -ENOMEM; | |
540 | ret = f2fs_decrypt(ctx, page); | |
541 | f2fs_release_crypto_ctx(ctx); | |
542 | return ret; | |
543 | } | |
544 | ||
545 | bool f2fs_valid_contents_enc_mode(uint32_t mode) | |
546 | { | |
547 | return (mode == F2FS_ENCRYPTION_MODE_AES_256_XTS); | |
548 | } | |
549 | ||
550 | /** | |
551 | * f2fs_validate_encryption_key_size() - Validate the encryption key size | |
552 | * @mode: The key mode. | |
553 | * @size: The key size to validate. | |
554 | * | |
555 | * Return: The validated key size for @mode. Zero if invalid. | |
556 | */ | |
557 | uint32_t f2fs_validate_encryption_key_size(uint32_t mode, uint32_t size) | |
558 | { | |
559 | if (size == f2fs_encryption_key_size(mode)) | |
560 | return size; | |
561 | return 0; | |
562 | } |