Merge remote-tracking branch 'mailbox/mailbox-for-next'
[deliverable/linux.git] / crypto / algif_skcipher.c
1 /*
2 * algif_skcipher: User-space interface for skcipher algorithms
3 *
4 * This file provides the user-space API for symmetric key ciphers.
5 *
6 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 */
14
15 #include <crypto/scatterwalk.h>
16 #include <crypto/skcipher.h>
17 #include <crypto/if_alg.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/kernel.h>
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/net.h>
24 #include <net/sock.h>
25
26 struct skcipher_sg_list {
27 struct list_head list;
28
29 int cur;
30
31 struct scatterlist sg[0];
32 };
33
34 struct skcipher_tfm {
35 struct crypto_skcipher *skcipher;
36 bool has_key;
37 };
38
39 struct skcipher_ctx {
40 struct list_head tsgl;
41 struct af_alg_sgl rsgl;
42
43 void *iv;
44
45 struct af_alg_completion completion;
46
47 atomic_t inflight;
48 size_t used;
49
50 unsigned int len;
51 bool more;
52 bool merge;
53 bool enc;
54
55 struct skcipher_request req;
56 };
57
58 struct skcipher_async_rsgl {
59 struct af_alg_sgl sgl;
60 struct list_head list;
61 };
62
63 struct skcipher_async_req {
64 struct kiocb *iocb;
65 struct skcipher_async_rsgl first_sgl;
66 struct list_head list;
67 struct scatterlist *tsg;
68 atomic_t *inflight;
69 struct skcipher_request req;
70 };
71
72 #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
73 sizeof(struct scatterlist) - 1)
74
75 static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
76 {
77 struct skcipher_async_rsgl *rsgl, *tmp;
78 struct scatterlist *sgl;
79 struct scatterlist *sg;
80 int i, n;
81
82 list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) {
83 af_alg_free_sg(&rsgl->sgl);
84 if (rsgl != &sreq->first_sgl)
85 kfree(rsgl);
86 }
87 sgl = sreq->tsg;
88 n = sg_nents(sgl);
89 for_each_sg(sgl, sg, n, i)
90 put_page(sg_page(sg));
91
92 kfree(sreq->tsg);
93 }
94
95 static void skcipher_async_cb(struct crypto_async_request *req, int err)
96 {
97 struct skcipher_async_req *sreq = req->data;
98 struct kiocb *iocb = sreq->iocb;
99
100 atomic_dec(sreq->inflight);
101 skcipher_free_async_sgls(sreq);
102 kzfree(sreq);
103 iocb->ki_complete(iocb, err, err);
104 }
105
106 static inline int skcipher_sndbuf(struct sock *sk)
107 {
108 struct alg_sock *ask = alg_sk(sk);
109 struct skcipher_ctx *ctx = ask->private;
110
111 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
112 ctx->used, 0);
113 }
114
115 static inline bool skcipher_writable(struct sock *sk)
116 {
117 return PAGE_SIZE <= skcipher_sndbuf(sk);
118 }
119
120 static int skcipher_alloc_sgl(struct sock *sk)
121 {
122 struct alg_sock *ask = alg_sk(sk);
123 struct skcipher_ctx *ctx = ask->private;
124 struct skcipher_sg_list *sgl;
125 struct scatterlist *sg = NULL;
126
127 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
128 if (!list_empty(&ctx->tsgl))
129 sg = sgl->sg;
130
131 if (!sg || sgl->cur >= MAX_SGL_ENTS) {
132 sgl = sock_kmalloc(sk, sizeof(*sgl) +
133 sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
134 GFP_KERNEL);
135 if (!sgl)
136 return -ENOMEM;
137
138 sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
139 sgl->cur = 0;
140
141 if (sg)
142 sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
143
144 list_add_tail(&sgl->list, &ctx->tsgl);
145 }
146
147 return 0;
148 }
149
150 static void skcipher_pull_sgl(struct sock *sk, size_t used, int put)
151 {
152 struct alg_sock *ask = alg_sk(sk);
153 struct skcipher_ctx *ctx = ask->private;
154 struct skcipher_sg_list *sgl;
155 struct scatterlist *sg;
156 int i;
157
158 while (!list_empty(&ctx->tsgl)) {
159 sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list,
160 list);
161 sg = sgl->sg;
162
163 for (i = 0; i < sgl->cur; i++) {
164 size_t plen = min_t(size_t, used, sg[i].length);
165
166 if (!sg_page(sg + i))
167 continue;
168
169 sg[i].length -= plen;
170 sg[i].offset += plen;
171
172 used -= plen;
173 ctx->used -= plen;
174
175 if (sg[i].length)
176 return;
177 if (put)
178 put_page(sg_page(sg + i));
179 sg_assign_page(sg + i, NULL);
180 }
181
182 list_del(&sgl->list);
183 sock_kfree_s(sk, sgl,
184 sizeof(*sgl) + sizeof(sgl->sg[0]) *
185 (MAX_SGL_ENTS + 1));
186 }
187
188 if (!ctx->used)
189 ctx->merge = 0;
190 }
191
192 static void skcipher_free_sgl(struct sock *sk)
193 {
194 struct alg_sock *ask = alg_sk(sk);
195 struct skcipher_ctx *ctx = ask->private;
196
197 skcipher_pull_sgl(sk, ctx->used, 1);
198 }
199
200 static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
201 {
202 long timeout;
203 DEFINE_WAIT(wait);
204 int err = -ERESTARTSYS;
205
206 if (flags & MSG_DONTWAIT)
207 return -EAGAIN;
208
209 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
210
211 for (;;) {
212 if (signal_pending(current))
213 break;
214 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
215 timeout = MAX_SCHEDULE_TIMEOUT;
216 if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) {
217 err = 0;
218 break;
219 }
220 }
221 finish_wait(sk_sleep(sk), &wait);
222
223 return err;
224 }
225
226 static void skcipher_wmem_wakeup(struct sock *sk)
227 {
228 struct socket_wq *wq;
229
230 if (!skcipher_writable(sk))
231 return;
232
233 rcu_read_lock();
234 wq = rcu_dereference(sk->sk_wq);
235 if (skwq_has_sleeper(wq))
236 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
237 POLLRDNORM |
238 POLLRDBAND);
239 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
240 rcu_read_unlock();
241 }
242
243 static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
244 {
245 struct alg_sock *ask = alg_sk(sk);
246 struct skcipher_ctx *ctx = ask->private;
247 long timeout;
248 DEFINE_WAIT(wait);
249 int err = -ERESTARTSYS;
250
251 if (flags & MSG_DONTWAIT) {
252 return -EAGAIN;
253 }
254
255 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
256
257 for (;;) {
258 if (signal_pending(current))
259 break;
260 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
261 timeout = MAX_SCHEDULE_TIMEOUT;
262 if (sk_wait_event(sk, &timeout, ctx->used)) {
263 err = 0;
264 break;
265 }
266 }
267 finish_wait(sk_sleep(sk), &wait);
268
269 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
270
271 return err;
272 }
273
274 static void skcipher_data_wakeup(struct sock *sk)
275 {
276 struct alg_sock *ask = alg_sk(sk);
277 struct skcipher_ctx *ctx = ask->private;
278 struct socket_wq *wq;
279
280 if (!ctx->used)
281 return;
282
283 rcu_read_lock();
284 wq = rcu_dereference(sk->sk_wq);
285 if (skwq_has_sleeper(wq))
286 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
287 POLLRDNORM |
288 POLLRDBAND);
289 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
290 rcu_read_unlock();
291 }
292
293 static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
294 size_t size)
295 {
296 struct sock *sk = sock->sk;
297 struct alg_sock *ask = alg_sk(sk);
298 struct sock *psk = ask->parent;
299 struct alg_sock *pask = alg_sk(psk);
300 struct skcipher_ctx *ctx = ask->private;
301 struct skcipher_tfm *skc = pask->private;
302 struct crypto_skcipher *tfm = skc->skcipher;
303 unsigned ivsize = crypto_skcipher_ivsize(tfm);
304 struct skcipher_sg_list *sgl;
305 struct af_alg_control con = {};
306 long copied = 0;
307 bool enc = 0;
308 bool init = 0;
309 int err;
310 int i;
311
312 if (msg->msg_controllen) {
313 err = af_alg_cmsg_send(msg, &con);
314 if (err)
315 return err;
316
317 init = 1;
318 switch (con.op) {
319 case ALG_OP_ENCRYPT:
320 enc = 1;
321 break;
322 case ALG_OP_DECRYPT:
323 enc = 0;
324 break;
325 default:
326 return -EINVAL;
327 }
328
329 if (con.iv && con.iv->ivlen != ivsize)
330 return -EINVAL;
331 }
332
333 err = -EINVAL;
334
335 lock_sock(sk);
336 if (!ctx->more && ctx->used)
337 goto unlock;
338
339 if (init) {
340 ctx->enc = enc;
341 if (con.iv)
342 memcpy(ctx->iv, con.iv->iv, ivsize);
343 }
344
345 while (size) {
346 struct scatterlist *sg;
347 unsigned long len = size;
348 size_t plen;
349
350 if (ctx->merge) {
351 sgl = list_entry(ctx->tsgl.prev,
352 struct skcipher_sg_list, list);
353 sg = sgl->sg + sgl->cur - 1;
354 len = min_t(unsigned long, len,
355 PAGE_SIZE - sg->offset - sg->length);
356
357 err = memcpy_from_msg(page_address(sg_page(sg)) +
358 sg->offset + sg->length,
359 msg, len);
360 if (err)
361 goto unlock;
362
363 sg->length += len;
364 ctx->merge = (sg->offset + sg->length) &
365 (PAGE_SIZE - 1);
366
367 ctx->used += len;
368 copied += len;
369 size -= len;
370 continue;
371 }
372
373 if (!skcipher_writable(sk)) {
374 err = skcipher_wait_for_wmem(sk, msg->msg_flags);
375 if (err)
376 goto unlock;
377 }
378
379 len = min_t(unsigned long, len, skcipher_sndbuf(sk));
380
381 err = skcipher_alloc_sgl(sk);
382 if (err)
383 goto unlock;
384
385 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
386 sg = sgl->sg;
387 if (sgl->cur)
388 sg_unmark_end(sg + sgl->cur - 1);
389 do {
390 i = sgl->cur;
391 plen = min_t(size_t, len, PAGE_SIZE);
392
393 sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
394 err = -ENOMEM;
395 if (!sg_page(sg + i))
396 goto unlock;
397
398 err = memcpy_from_msg(page_address(sg_page(sg + i)),
399 msg, plen);
400 if (err) {
401 __free_page(sg_page(sg + i));
402 sg_assign_page(sg + i, NULL);
403 goto unlock;
404 }
405
406 sg[i].length = plen;
407 len -= plen;
408 ctx->used += plen;
409 copied += plen;
410 size -= plen;
411 sgl->cur++;
412 } while (len && sgl->cur < MAX_SGL_ENTS);
413
414 if (!size)
415 sg_mark_end(sg + sgl->cur - 1);
416
417 ctx->merge = plen & (PAGE_SIZE - 1);
418 }
419
420 err = 0;
421
422 ctx->more = msg->msg_flags & MSG_MORE;
423
424 unlock:
425 skcipher_data_wakeup(sk);
426 release_sock(sk);
427
428 return copied ?: err;
429 }
430
431 static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
432 int offset, size_t size, int flags)
433 {
434 struct sock *sk = sock->sk;
435 struct alg_sock *ask = alg_sk(sk);
436 struct skcipher_ctx *ctx = ask->private;
437 struct skcipher_sg_list *sgl;
438 int err = -EINVAL;
439
440 if (flags & MSG_SENDPAGE_NOTLAST)
441 flags |= MSG_MORE;
442
443 lock_sock(sk);
444 if (!ctx->more && ctx->used)
445 goto unlock;
446
447 if (!size)
448 goto done;
449
450 if (!skcipher_writable(sk)) {
451 err = skcipher_wait_for_wmem(sk, flags);
452 if (err)
453 goto unlock;
454 }
455
456 err = skcipher_alloc_sgl(sk);
457 if (err)
458 goto unlock;
459
460 ctx->merge = 0;
461 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
462
463 if (sgl->cur)
464 sg_unmark_end(sgl->sg + sgl->cur - 1);
465
466 sg_mark_end(sgl->sg + sgl->cur);
467 get_page(page);
468 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
469 sgl->cur++;
470 ctx->used += size;
471
472 done:
473 ctx->more = flags & MSG_MORE;
474
475 unlock:
476 skcipher_data_wakeup(sk);
477 release_sock(sk);
478
479 return err ?: size;
480 }
481
482 static int skcipher_all_sg_nents(struct skcipher_ctx *ctx)
483 {
484 struct skcipher_sg_list *sgl;
485 struct scatterlist *sg;
486 int nents = 0;
487
488 list_for_each_entry(sgl, &ctx->tsgl, list) {
489 sg = sgl->sg;
490
491 while (!sg->length)
492 sg++;
493
494 nents += sg_nents(sg);
495 }
496 return nents;
497 }
498
499 static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
500 int flags)
501 {
502 struct sock *sk = sock->sk;
503 struct alg_sock *ask = alg_sk(sk);
504 struct sock *psk = ask->parent;
505 struct alg_sock *pask = alg_sk(psk);
506 struct skcipher_ctx *ctx = ask->private;
507 struct skcipher_tfm *skc = pask->private;
508 struct crypto_skcipher *tfm = skc->skcipher;
509 struct skcipher_sg_list *sgl;
510 struct scatterlist *sg;
511 struct skcipher_async_req *sreq;
512 struct skcipher_request *req;
513 struct skcipher_async_rsgl *last_rsgl = NULL;
514 unsigned int txbufs = 0, len = 0, tx_nents;
515 unsigned int reqsize = crypto_skcipher_reqsize(tfm);
516 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
517 int err = -ENOMEM;
518 bool mark = false;
519 char *iv;
520
521 sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
522 if (unlikely(!sreq))
523 goto out;
524
525 req = &sreq->req;
526 iv = (char *)(req + 1) + reqsize;
527 sreq->iocb = msg->msg_iocb;
528 INIT_LIST_HEAD(&sreq->list);
529 sreq->inflight = &ctx->inflight;
530
531 lock_sock(sk);
532 tx_nents = skcipher_all_sg_nents(ctx);
533 sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
534 if (unlikely(!sreq->tsg))
535 goto unlock;
536 sg_init_table(sreq->tsg, tx_nents);
537 memcpy(iv, ctx->iv, ivsize);
538 skcipher_request_set_tfm(req, tfm);
539 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
540 skcipher_async_cb, sreq);
541
542 while (iov_iter_count(&msg->msg_iter)) {
543 struct skcipher_async_rsgl *rsgl;
544 int used;
545
546 if (!ctx->used) {
547 err = skcipher_wait_for_data(sk, flags);
548 if (err)
549 goto free;
550 }
551 sgl = list_first_entry(&ctx->tsgl,
552 struct skcipher_sg_list, list);
553 sg = sgl->sg;
554
555 while (!sg->length)
556 sg++;
557
558 used = min_t(unsigned long, ctx->used,
559 iov_iter_count(&msg->msg_iter));
560 used = min_t(unsigned long, used, sg->length);
561
562 if (txbufs == tx_nents) {
563 struct scatterlist *tmp;
564 int x;
565 /* Ran out of tx slots in async request
566 * need to expand */
567 tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
568 GFP_KERNEL);
569 if (!tmp)
570 goto free;
571
572 sg_init_table(tmp, tx_nents * 2);
573 for (x = 0; x < tx_nents; x++)
574 sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]),
575 sreq->tsg[x].length,
576 sreq->tsg[x].offset);
577 kfree(sreq->tsg);
578 sreq->tsg = tmp;
579 tx_nents *= 2;
580 mark = true;
581 }
582 /* Need to take over the tx sgl from ctx
583 * to the asynch req - these sgls will be freed later */
584 sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
585 sg->offset);
586
587 if (list_empty(&sreq->list)) {
588 rsgl = &sreq->first_sgl;
589 list_add_tail(&rsgl->list, &sreq->list);
590 } else {
591 rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL);
592 if (!rsgl) {
593 err = -ENOMEM;
594 goto free;
595 }
596 list_add_tail(&rsgl->list, &sreq->list);
597 }
598
599 used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used);
600 err = used;
601 if (used < 0)
602 goto free;
603 if (last_rsgl)
604 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
605
606 last_rsgl = rsgl;
607 len += used;
608 skcipher_pull_sgl(sk, used, 0);
609 iov_iter_advance(&msg->msg_iter, used);
610 }
611
612 if (mark)
613 sg_mark_end(sreq->tsg + txbufs - 1);
614
615 skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
616 len, iv);
617 err = ctx->enc ? crypto_skcipher_encrypt(req) :
618 crypto_skcipher_decrypt(req);
619 if (err == -EINPROGRESS) {
620 atomic_inc(&ctx->inflight);
621 err = -EIOCBQUEUED;
622 sreq = NULL;
623 goto unlock;
624 }
625 free:
626 skcipher_free_async_sgls(sreq);
627 unlock:
628 skcipher_wmem_wakeup(sk);
629 release_sock(sk);
630 kzfree(sreq);
631 out:
632 return err;
633 }
634
635 static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
636 int flags)
637 {
638 struct sock *sk = sock->sk;
639 struct alg_sock *ask = alg_sk(sk);
640 struct sock *psk = ask->parent;
641 struct alg_sock *pask = alg_sk(psk);
642 struct skcipher_ctx *ctx = ask->private;
643 struct skcipher_tfm *skc = pask->private;
644 struct crypto_skcipher *tfm = skc->skcipher;
645 unsigned bs = crypto_skcipher_blocksize(tfm);
646 struct skcipher_sg_list *sgl;
647 struct scatterlist *sg;
648 int err = -EAGAIN;
649 int used;
650 long copied = 0;
651
652 lock_sock(sk);
653 while (msg_data_left(msg)) {
654 if (!ctx->used) {
655 err = skcipher_wait_for_data(sk, flags);
656 if (err)
657 goto unlock;
658 }
659
660 used = min_t(unsigned long, ctx->used, msg_data_left(msg));
661
662 used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
663 err = used;
664 if (err < 0)
665 goto unlock;
666
667 if (ctx->more || used < ctx->used)
668 used -= used % bs;
669
670 err = -EINVAL;
671 if (!used)
672 goto free;
673
674 sgl = list_first_entry(&ctx->tsgl,
675 struct skcipher_sg_list, list);
676 sg = sgl->sg;
677
678 while (!sg->length)
679 sg++;
680
681 skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used,
682 ctx->iv);
683
684 err = af_alg_wait_for_completion(
685 ctx->enc ?
686 crypto_skcipher_encrypt(&ctx->req) :
687 crypto_skcipher_decrypt(&ctx->req),
688 &ctx->completion);
689
690 free:
691 af_alg_free_sg(&ctx->rsgl);
692
693 if (err)
694 goto unlock;
695
696 copied += used;
697 skcipher_pull_sgl(sk, used, 1);
698 iov_iter_advance(&msg->msg_iter, used);
699 }
700
701 err = 0;
702
703 unlock:
704 skcipher_wmem_wakeup(sk);
705 release_sock(sk);
706
707 return copied ?: err;
708 }
709
710 static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
711 size_t ignored, int flags)
712 {
713 return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
714 skcipher_recvmsg_async(sock, msg, flags) :
715 skcipher_recvmsg_sync(sock, msg, flags);
716 }
717
718 static unsigned int skcipher_poll(struct file *file, struct socket *sock,
719 poll_table *wait)
720 {
721 struct sock *sk = sock->sk;
722 struct alg_sock *ask = alg_sk(sk);
723 struct skcipher_ctx *ctx = ask->private;
724 unsigned int mask;
725
726 sock_poll_wait(file, sk_sleep(sk), wait);
727 mask = 0;
728
729 if (ctx->used)
730 mask |= POLLIN | POLLRDNORM;
731
732 if (skcipher_writable(sk))
733 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
734
735 return mask;
736 }
737
738 static struct proto_ops algif_skcipher_ops = {
739 .family = PF_ALG,
740
741 .connect = sock_no_connect,
742 .socketpair = sock_no_socketpair,
743 .getname = sock_no_getname,
744 .ioctl = sock_no_ioctl,
745 .listen = sock_no_listen,
746 .shutdown = sock_no_shutdown,
747 .getsockopt = sock_no_getsockopt,
748 .mmap = sock_no_mmap,
749 .bind = sock_no_bind,
750 .accept = sock_no_accept,
751 .setsockopt = sock_no_setsockopt,
752
753 .release = af_alg_release,
754 .sendmsg = skcipher_sendmsg,
755 .sendpage = skcipher_sendpage,
756 .recvmsg = skcipher_recvmsg,
757 .poll = skcipher_poll,
758 };
759
760 static int skcipher_check_key(struct socket *sock)
761 {
762 int err = 0;
763 struct sock *psk;
764 struct alg_sock *pask;
765 struct skcipher_tfm *tfm;
766 struct sock *sk = sock->sk;
767 struct alg_sock *ask = alg_sk(sk);
768
769 lock_sock(sk);
770 if (ask->refcnt)
771 goto unlock_child;
772
773 psk = ask->parent;
774 pask = alg_sk(ask->parent);
775 tfm = pask->private;
776
777 err = -ENOKEY;
778 lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
779 if (!tfm->has_key)
780 goto unlock;
781
782 if (!pask->refcnt++)
783 sock_hold(psk);
784
785 ask->refcnt = 1;
786 sock_put(psk);
787
788 err = 0;
789
790 unlock:
791 release_sock(psk);
792 unlock_child:
793 release_sock(sk);
794
795 return err;
796 }
797
798 static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
799 size_t size)
800 {
801 int err;
802
803 err = skcipher_check_key(sock);
804 if (err)
805 return err;
806
807 return skcipher_sendmsg(sock, msg, size);
808 }
809
810 static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
811 int offset, size_t size, int flags)
812 {
813 int err;
814
815 err = skcipher_check_key(sock);
816 if (err)
817 return err;
818
819 return skcipher_sendpage(sock, page, offset, size, flags);
820 }
821
822 static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
823 size_t ignored, int flags)
824 {
825 int err;
826
827 err = skcipher_check_key(sock);
828 if (err)
829 return err;
830
831 return skcipher_recvmsg(sock, msg, ignored, flags);
832 }
833
834 static struct proto_ops algif_skcipher_ops_nokey = {
835 .family = PF_ALG,
836
837 .connect = sock_no_connect,
838 .socketpair = sock_no_socketpair,
839 .getname = sock_no_getname,
840 .ioctl = sock_no_ioctl,
841 .listen = sock_no_listen,
842 .shutdown = sock_no_shutdown,
843 .getsockopt = sock_no_getsockopt,
844 .mmap = sock_no_mmap,
845 .bind = sock_no_bind,
846 .accept = sock_no_accept,
847 .setsockopt = sock_no_setsockopt,
848
849 .release = af_alg_release,
850 .sendmsg = skcipher_sendmsg_nokey,
851 .sendpage = skcipher_sendpage_nokey,
852 .recvmsg = skcipher_recvmsg_nokey,
853 .poll = skcipher_poll,
854 };
855
856 static void *skcipher_bind(const char *name, u32 type, u32 mask)
857 {
858 struct skcipher_tfm *tfm;
859 struct crypto_skcipher *skcipher;
860
861 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
862 if (!tfm)
863 return ERR_PTR(-ENOMEM);
864
865 skcipher = crypto_alloc_skcipher(name, type, mask);
866 if (IS_ERR(skcipher)) {
867 kfree(tfm);
868 return ERR_CAST(skcipher);
869 }
870
871 tfm->skcipher = skcipher;
872
873 return tfm;
874 }
875
876 static void skcipher_release(void *private)
877 {
878 struct skcipher_tfm *tfm = private;
879
880 crypto_free_skcipher(tfm->skcipher);
881 kfree(tfm);
882 }
883
884 static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
885 {
886 struct skcipher_tfm *tfm = private;
887 int err;
888
889 err = crypto_skcipher_setkey(tfm->skcipher, key, keylen);
890 tfm->has_key = !err;
891
892 return err;
893 }
894
895 static void skcipher_wait(struct sock *sk)
896 {
897 struct alg_sock *ask = alg_sk(sk);
898 struct skcipher_ctx *ctx = ask->private;
899 int ctr = 0;
900
901 while (atomic_read(&ctx->inflight) && ctr++ < 100)
902 msleep(100);
903 }
904
905 static void skcipher_sock_destruct(struct sock *sk)
906 {
907 struct alg_sock *ask = alg_sk(sk);
908 struct skcipher_ctx *ctx = ask->private;
909 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
910
911 if (atomic_read(&ctx->inflight))
912 skcipher_wait(sk);
913
914 skcipher_free_sgl(sk);
915 sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
916 sock_kfree_s(sk, ctx, ctx->len);
917 af_alg_release_parent(sk);
918 }
919
920 static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
921 {
922 struct skcipher_ctx *ctx;
923 struct alg_sock *ask = alg_sk(sk);
924 struct skcipher_tfm *tfm = private;
925 struct crypto_skcipher *skcipher = tfm->skcipher;
926 unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher);
927
928 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
929 if (!ctx)
930 return -ENOMEM;
931
932 ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher),
933 GFP_KERNEL);
934 if (!ctx->iv) {
935 sock_kfree_s(sk, ctx, len);
936 return -ENOMEM;
937 }
938
939 memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
940
941 INIT_LIST_HEAD(&ctx->tsgl);
942 ctx->len = len;
943 ctx->used = 0;
944 ctx->more = 0;
945 ctx->merge = 0;
946 ctx->enc = 0;
947 atomic_set(&ctx->inflight, 0);
948 af_alg_init_completion(&ctx->completion);
949
950 ask->private = ctx;
951
952 skcipher_request_set_tfm(&ctx->req, skcipher);
953 skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
954 CRYPTO_TFM_REQ_MAY_BACKLOG,
955 af_alg_complete, &ctx->completion);
956
957 sk->sk_destruct = skcipher_sock_destruct;
958
959 return 0;
960 }
961
962 static int skcipher_accept_parent(void *private, struct sock *sk)
963 {
964 struct skcipher_tfm *tfm = private;
965
966 if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher))
967 return -ENOKEY;
968
969 return skcipher_accept_parent_nokey(private, sk);
970 }
971
972 static const struct af_alg_type algif_type_skcipher = {
973 .bind = skcipher_bind,
974 .release = skcipher_release,
975 .setkey = skcipher_setkey,
976 .accept = skcipher_accept_parent,
977 .accept_nokey = skcipher_accept_parent_nokey,
978 .ops = &algif_skcipher_ops,
979 .ops_nokey = &algif_skcipher_ops_nokey,
980 .name = "skcipher",
981 .owner = THIS_MODULE
982 };
983
984 static int __init algif_skcipher_init(void)
985 {
986 return af_alg_register_type(&algif_type_skcipher);
987 }
988
989 static void __exit algif_skcipher_exit(void)
990 {
991 int err = af_alg_unregister_type(&algif_type_skcipher);
992 BUG_ON(err);
993 }
994
995 module_init(algif_skcipher_init);
996 module_exit(algif_skcipher_exit);
997 MODULE_LICENSE("GPL");
This page took 0.051275 seconds and 5 git commands to generate.