Merge tag 'trace-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux...
[deliverable/linux.git] / net / ceph / crypto.c
1
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/err.h>
5 #include <linux/scatterlist.h>
6 #include <linux/slab.h>
7 #include <crypto/hash.h>
8 #include <linux/key-type.h>
9
10 #include <keys/ceph-type.h>
11 #include <keys/user-type.h>
12 #include <linux/ceph/decode.h>
13 #include "crypto.h"
14
15 int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
16 const struct ceph_crypto_key *src)
17 {
18 memcpy(dst, src, sizeof(struct ceph_crypto_key));
19 dst->key = kmemdup(src->key, src->len, GFP_NOFS);
20 if (!dst->key)
21 return -ENOMEM;
22 return 0;
23 }
24
25 int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
26 {
27 if (*p + sizeof(u16) + sizeof(key->created) +
28 sizeof(u16) + key->len > end)
29 return -ERANGE;
30 ceph_encode_16(p, key->type);
31 ceph_encode_copy(p, &key->created, sizeof(key->created));
32 ceph_encode_16(p, key->len);
33 ceph_encode_copy(p, key->key, key->len);
34 return 0;
35 }
36
37 int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
38 {
39 ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
40 key->type = ceph_decode_16(p);
41 ceph_decode_copy(p, &key->created, sizeof(key->created));
42 key->len = ceph_decode_16(p);
43 ceph_decode_need(p, end, key->len, bad);
44 key->key = kmalloc(key->len, GFP_NOFS);
45 if (!key->key)
46 return -ENOMEM;
47 ceph_decode_copy(p, key->key, key->len);
48 return 0;
49
50 bad:
51 dout("failed to decode crypto key\n");
52 return -EINVAL;
53 }
54
55 int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
56 {
57 int inlen = strlen(inkey);
58 int blen = inlen * 3 / 4;
59 void *buf, *p;
60 int ret;
61
62 dout("crypto_key_unarmor %s\n", inkey);
63 buf = kmalloc(blen, GFP_NOFS);
64 if (!buf)
65 return -ENOMEM;
66 blen = ceph_unarmor(buf, inkey, inkey+inlen);
67 if (blen < 0) {
68 kfree(buf);
69 return blen;
70 }
71
72 p = buf;
73 ret = ceph_crypto_key_decode(key, &p, p + blen);
74 kfree(buf);
75 if (ret)
76 return ret;
77 dout("crypto_key_unarmor key %p type %d len %d\n", key,
78 key->type, key->len);
79 return 0;
80 }
81
82
83
84 #define AES_KEY_SIZE 16
85
86 static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
87 {
88 return crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
89 }
90
91 static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
92
93 /*
94 * Should be used for buffers allocated with ceph_kvmalloc().
95 * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
96 * in-buffer (msg front).
97 *
98 * Dispose of @sgt with teardown_sgtable().
99 *
100 * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
101 * in cases where a single sg is sufficient. No attempt to reduce the
102 * number of sgs by squeezing physically contiguous pages together is
103 * made though, for simplicity.
104 */
105 static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
106 const void *buf, unsigned int buf_len)
107 {
108 struct scatterlist *sg;
109 const bool is_vmalloc = is_vmalloc_addr(buf);
110 unsigned int off = offset_in_page(buf);
111 unsigned int chunk_cnt = 1;
112 unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
113 int i;
114 int ret;
115
116 if (buf_len == 0) {
117 memset(sgt, 0, sizeof(*sgt));
118 return -EINVAL;
119 }
120
121 if (is_vmalloc) {
122 chunk_cnt = chunk_len >> PAGE_SHIFT;
123 chunk_len = PAGE_SIZE;
124 }
125
126 if (chunk_cnt > 1) {
127 ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
128 if (ret)
129 return ret;
130 } else {
131 WARN_ON(chunk_cnt != 1);
132 sg_init_table(prealloc_sg, 1);
133 sgt->sgl = prealloc_sg;
134 sgt->nents = sgt->orig_nents = 1;
135 }
136
137 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
138 struct page *page;
139 unsigned int len = min(chunk_len - off, buf_len);
140
141 if (is_vmalloc)
142 page = vmalloc_to_page(buf);
143 else
144 page = virt_to_page(buf);
145
146 sg_set_page(sg, page, len, off);
147
148 off = 0;
149 buf += len;
150 buf_len -= len;
151 }
152 WARN_ON(buf_len != 0);
153
154 return 0;
155 }
156
157 static void teardown_sgtable(struct sg_table *sgt)
158 {
159 if (sgt->orig_nents > 1)
160 sg_free_table(sgt);
161 }
162
163 static int ceph_aes_encrypt(const void *key, int key_len,
164 void *dst, size_t *dst_len,
165 const void *src, size_t src_len)
166 {
167 struct scatterlist sg_in[2], prealloc_sg;
168 struct sg_table sg_out;
169 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
170 struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
171 int ret;
172 void *iv;
173 int ivsize;
174 size_t zero_padding = (0x10 - (src_len & 0x0f));
175 char pad[16];
176
177 if (IS_ERR(tfm))
178 return PTR_ERR(tfm);
179
180 memset(pad, zero_padding, zero_padding);
181
182 *dst_len = src_len + zero_padding;
183
184 sg_init_table(sg_in, 2);
185 sg_set_buf(&sg_in[0], src, src_len);
186 sg_set_buf(&sg_in[1], pad, zero_padding);
187 ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
188 if (ret)
189 goto out_tfm;
190
191 crypto_blkcipher_setkey((void *)tfm, key, key_len);
192 iv = crypto_blkcipher_crt(tfm)->iv;
193 ivsize = crypto_blkcipher_ivsize(tfm);
194 memcpy(iv, aes_iv, ivsize);
195
196 /*
197 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
198 key, key_len, 1);
199 print_hex_dump(KERN_ERR, "enc src: ", DUMP_PREFIX_NONE, 16, 1,
200 src, src_len, 1);
201 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
202 pad, zero_padding, 1);
203 */
204 ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
205 src_len + zero_padding);
206 if (ret < 0) {
207 pr_err("ceph_aes_crypt failed %d\n", ret);
208 goto out_sg;
209 }
210 /*
211 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
212 dst, *dst_len, 1);
213 */
214
215 out_sg:
216 teardown_sgtable(&sg_out);
217 out_tfm:
218 crypto_free_blkcipher(tfm);
219 return ret;
220 }
221
222 static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
223 size_t *dst_len,
224 const void *src1, size_t src1_len,
225 const void *src2, size_t src2_len)
226 {
227 struct scatterlist sg_in[3], prealloc_sg;
228 struct sg_table sg_out;
229 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
230 struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
231 int ret;
232 void *iv;
233 int ivsize;
234 size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f));
235 char pad[16];
236
237 if (IS_ERR(tfm))
238 return PTR_ERR(tfm);
239
240 memset(pad, zero_padding, zero_padding);
241
242 *dst_len = src1_len + src2_len + zero_padding;
243
244 sg_init_table(sg_in, 3);
245 sg_set_buf(&sg_in[0], src1, src1_len);
246 sg_set_buf(&sg_in[1], src2, src2_len);
247 sg_set_buf(&sg_in[2], pad, zero_padding);
248 ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
249 if (ret)
250 goto out_tfm;
251
252 crypto_blkcipher_setkey((void *)tfm, key, key_len);
253 iv = crypto_blkcipher_crt(tfm)->iv;
254 ivsize = crypto_blkcipher_ivsize(tfm);
255 memcpy(iv, aes_iv, ivsize);
256
257 /*
258 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
259 key, key_len, 1);
260 print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1,
261 src1, src1_len, 1);
262 print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1,
263 src2, src2_len, 1);
264 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
265 pad, zero_padding, 1);
266 */
267 ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
268 src1_len + src2_len + zero_padding);
269 if (ret < 0) {
270 pr_err("ceph_aes_crypt2 failed %d\n", ret);
271 goto out_sg;
272 }
273 /*
274 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
275 dst, *dst_len, 1);
276 */
277
278 out_sg:
279 teardown_sgtable(&sg_out);
280 out_tfm:
281 crypto_free_blkcipher(tfm);
282 return ret;
283 }
284
285 static int ceph_aes_decrypt(const void *key, int key_len,
286 void *dst, size_t *dst_len,
287 const void *src, size_t src_len)
288 {
289 struct sg_table sg_in;
290 struct scatterlist sg_out[2], prealloc_sg;
291 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
292 struct blkcipher_desc desc = { .tfm = tfm };
293 char pad[16];
294 void *iv;
295 int ivsize;
296 int ret;
297 int last_byte;
298
299 if (IS_ERR(tfm))
300 return PTR_ERR(tfm);
301
302 sg_init_table(sg_out, 2);
303 sg_set_buf(&sg_out[0], dst, *dst_len);
304 sg_set_buf(&sg_out[1], pad, sizeof(pad));
305 ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
306 if (ret)
307 goto out_tfm;
308
309 crypto_blkcipher_setkey((void *)tfm, key, key_len);
310 iv = crypto_blkcipher_crt(tfm)->iv;
311 ivsize = crypto_blkcipher_ivsize(tfm);
312 memcpy(iv, aes_iv, ivsize);
313
314 /*
315 print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
316 key, key_len, 1);
317 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
318 src, src_len, 1);
319 */
320 ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
321 if (ret < 0) {
322 pr_err("ceph_aes_decrypt failed %d\n", ret);
323 goto out_sg;
324 }
325
326 if (src_len <= *dst_len)
327 last_byte = ((char *)dst)[src_len - 1];
328 else
329 last_byte = pad[src_len - *dst_len - 1];
330 if (last_byte <= 16 && src_len >= last_byte) {
331 *dst_len = src_len - last_byte;
332 } else {
333 pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
334 last_byte, (int)src_len);
335 return -EPERM; /* bad padding */
336 }
337 /*
338 print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
339 dst, *dst_len, 1);
340 */
341
342 out_sg:
343 teardown_sgtable(&sg_in);
344 out_tfm:
345 crypto_free_blkcipher(tfm);
346 return ret;
347 }
348
349 static int ceph_aes_decrypt2(const void *key, int key_len,
350 void *dst1, size_t *dst1_len,
351 void *dst2, size_t *dst2_len,
352 const void *src, size_t src_len)
353 {
354 struct sg_table sg_in;
355 struct scatterlist sg_out[3], prealloc_sg;
356 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
357 struct blkcipher_desc desc = { .tfm = tfm };
358 char pad[16];
359 void *iv;
360 int ivsize;
361 int ret;
362 int last_byte;
363
364 if (IS_ERR(tfm))
365 return PTR_ERR(tfm);
366
367 sg_init_table(sg_out, 3);
368 sg_set_buf(&sg_out[0], dst1, *dst1_len);
369 sg_set_buf(&sg_out[1], dst2, *dst2_len);
370 sg_set_buf(&sg_out[2], pad, sizeof(pad));
371 ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
372 if (ret)
373 goto out_tfm;
374
375 crypto_blkcipher_setkey((void *)tfm, key, key_len);
376 iv = crypto_blkcipher_crt(tfm)->iv;
377 ivsize = crypto_blkcipher_ivsize(tfm);
378 memcpy(iv, aes_iv, ivsize);
379
380 /*
381 print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
382 key, key_len, 1);
383 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
384 src, src_len, 1);
385 */
386 ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
387 if (ret < 0) {
388 pr_err("ceph_aes_decrypt failed %d\n", ret);
389 goto out_sg;
390 }
391
392 if (src_len <= *dst1_len)
393 last_byte = ((char *)dst1)[src_len - 1];
394 else if (src_len <= *dst1_len + *dst2_len)
395 last_byte = ((char *)dst2)[src_len - *dst1_len - 1];
396 else
397 last_byte = pad[src_len - *dst1_len - *dst2_len - 1];
398 if (last_byte <= 16 && src_len >= last_byte) {
399 src_len -= last_byte;
400 } else {
401 pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
402 last_byte, (int)src_len);
403 return -EPERM; /* bad padding */
404 }
405
406 if (src_len < *dst1_len) {
407 *dst1_len = src_len;
408 *dst2_len = 0;
409 } else {
410 *dst2_len = src_len - *dst1_len;
411 }
412 /*
413 print_hex_dump(KERN_ERR, "dec out1: ", DUMP_PREFIX_NONE, 16, 1,
414 dst1, *dst1_len, 1);
415 print_hex_dump(KERN_ERR, "dec out2: ", DUMP_PREFIX_NONE, 16, 1,
416 dst2, *dst2_len, 1);
417 */
418
419 out_sg:
420 teardown_sgtable(&sg_in);
421 out_tfm:
422 crypto_free_blkcipher(tfm);
423 return ret;
424 }
425
426
427 int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
428 const void *src, size_t src_len)
429 {
430 switch (secret->type) {
431 case CEPH_CRYPTO_NONE:
432 if (*dst_len < src_len)
433 return -ERANGE;
434 memcpy(dst, src, src_len);
435 *dst_len = src_len;
436 return 0;
437
438 case CEPH_CRYPTO_AES:
439 return ceph_aes_decrypt(secret->key, secret->len, dst,
440 dst_len, src, src_len);
441
442 default:
443 return -EINVAL;
444 }
445 }
446
447 int ceph_decrypt2(struct ceph_crypto_key *secret,
448 void *dst1, size_t *dst1_len,
449 void *dst2, size_t *dst2_len,
450 const void *src, size_t src_len)
451 {
452 size_t t;
453
454 switch (secret->type) {
455 case CEPH_CRYPTO_NONE:
456 if (*dst1_len + *dst2_len < src_len)
457 return -ERANGE;
458 t = min(*dst1_len, src_len);
459 memcpy(dst1, src, t);
460 *dst1_len = t;
461 src += t;
462 src_len -= t;
463 if (src_len) {
464 t = min(*dst2_len, src_len);
465 memcpy(dst2, src, t);
466 *dst2_len = t;
467 }
468 return 0;
469
470 case CEPH_CRYPTO_AES:
471 return ceph_aes_decrypt2(secret->key, secret->len,
472 dst1, dst1_len, dst2, dst2_len,
473 src, src_len);
474
475 default:
476 return -EINVAL;
477 }
478 }
479
480 int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
481 const void *src, size_t src_len)
482 {
483 switch (secret->type) {
484 case CEPH_CRYPTO_NONE:
485 if (*dst_len < src_len)
486 return -ERANGE;
487 memcpy(dst, src, src_len);
488 *dst_len = src_len;
489 return 0;
490
491 case CEPH_CRYPTO_AES:
492 return ceph_aes_encrypt(secret->key, secret->len, dst,
493 dst_len, src, src_len);
494
495 default:
496 return -EINVAL;
497 }
498 }
499
500 int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
501 const void *src1, size_t src1_len,
502 const void *src2, size_t src2_len)
503 {
504 switch (secret->type) {
505 case CEPH_CRYPTO_NONE:
506 if (*dst_len < src1_len + src2_len)
507 return -ERANGE;
508 memcpy(dst, src1, src1_len);
509 memcpy(dst + src1_len, src2, src2_len);
510 *dst_len = src1_len + src2_len;
511 return 0;
512
513 case CEPH_CRYPTO_AES:
514 return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len,
515 src1, src1_len, src2, src2_len);
516
517 default:
518 return -EINVAL;
519 }
520 }
521
522 static int ceph_key_preparse(struct key_preparsed_payload *prep)
523 {
524 struct ceph_crypto_key *ckey;
525 size_t datalen = prep->datalen;
526 int ret;
527 void *p;
528
529 ret = -EINVAL;
530 if (datalen <= 0 || datalen > 32767 || !prep->data)
531 goto err;
532
533 ret = -ENOMEM;
534 ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
535 if (!ckey)
536 goto err;
537
538 /* TODO ceph_crypto_key_decode should really take const input */
539 p = (void *)prep->data;
540 ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen);
541 if (ret < 0)
542 goto err_ckey;
543
544 prep->payload[0] = ckey;
545 prep->quotalen = datalen;
546 return 0;
547
548 err_ckey:
549 kfree(ckey);
550 err:
551 return ret;
552 }
553
554 static void ceph_key_free_preparse(struct key_preparsed_payload *prep)
555 {
556 struct ceph_crypto_key *ckey = prep->payload[0];
557 ceph_crypto_key_destroy(ckey);
558 kfree(ckey);
559 }
560
561 static void ceph_key_destroy(struct key *key)
562 {
563 struct ceph_crypto_key *ckey = key->payload.data;
564
565 ceph_crypto_key_destroy(ckey);
566 kfree(ckey);
567 }
568
569 struct key_type key_type_ceph = {
570 .name = "ceph",
571 .preparse = ceph_key_preparse,
572 .free_preparse = ceph_key_free_preparse,
573 .instantiate = generic_key_instantiate,
574 .destroy = ceph_key_destroy,
575 };
576
577 int ceph_crypto_init(void) {
578 return register_key_type(&key_type_ceph);
579 }
580
581 void ceph_crypto_shutdown(void) {
582 unregister_key_type(&key_type_ceph);
583 }
This page took 0.045662 seconds and 6 git commands to generate.