Commit | Line | Data |
---|---|---|
8ff59090 HX |
1 | /* |
2 | * algif_skcipher: User-space interface for skcipher algorithms | |
3 | * | |
4 | * This file provides the user-space API for symmetric key ciphers. | |
5 | * | |
6 | * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the Free | |
10 | * Software Foundation; either version 2 of the License, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include <crypto/scatterwalk.h> | |
16 | #include <crypto/skcipher.h> | |
17 | #include <crypto/if_alg.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/list.h> | |
20 | #include <linux/kernel.h> | |
21 | #include <linux/mm.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/net.h> | |
24 | #include <net/sock.h> | |
25 | ||
26 | struct skcipher_sg_list { | |
27 | struct list_head list; | |
28 | ||
29 | int cur; | |
30 | ||
31 | struct scatterlist sg[0]; | |
32 | }; | |
33 | ||
34 | struct skcipher_ctx { | |
35 | struct list_head tsgl; | |
36 | struct af_alg_sgl rsgl; | |
37 | ||
38 | void *iv; | |
39 | ||
40 | struct af_alg_completion completion; | |
41 | ||
a596999b | 42 | atomic_t inflight; |
8ff59090 HX |
43 | unsigned used; |
44 | ||
45 | unsigned int len; | |
46 | bool more; | |
47 | bool merge; | |
48 | bool enc; | |
49 | ||
50 | struct ablkcipher_request req; | |
51 | }; | |
52 | ||
a596999b TS |
53 | struct skcipher_async_rsgl { |
54 | struct af_alg_sgl sgl; | |
55 | struct list_head list; | |
56 | }; | |
57 | ||
58 | struct skcipher_async_req { | |
59 | struct kiocb *iocb; | |
60 | struct skcipher_async_rsgl first_sgl; | |
61 | struct list_head list; | |
62 | struct scatterlist *tsg; | |
63 | char iv[]; | |
64 | }; | |
65 | ||
66 | #define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \ | |
67 | crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))) | |
68 | ||
69 | #define GET_REQ_SIZE(ctx) \ | |
70 | crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)) | |
71 | ||
72 | #define GET_IV_SIZE(ctx) \ | |
73 | crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req)) | |
74 | ||
e2cffb5f | 75 | #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ |
8ff59090 HX |
76 | sizeof(struct scatterlist) - 1) |
77 | ||
a596999b TS |
78 | static void skcipher_free_async_sgls(struct skcipher_async_req *sreq) |
79 | { | |
80 | struct skcipher_async_rsgl *rsgl, *tmp; | |
81 | struct scatterlist *sgl; | |
82 | struct scatterlist *sg; | |
83 | int i, n; | |
84 | ||
85 | list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) { | |
86 | af_alg_free_sg(&rsgl->sgl); | |
87 | if (rsgl != &sreq->first_sgl) | |
88 | kfree(rsgl); | |
89 | } | |
90 | sgl = sreq->tsg; | |
91 | n = sg_nents(sgl); | |
92 | for_each_sg(sgl, sg, n, i) | |
93 | put_page(sg_page(sg)); | |
94 | ||
95 | kfree(sreq->tsg); | |
96 | } | |
97 | ||
98 | static void skcipher_async_cb(struct crypto_async_request *req, int err) | |
99 | { | |
100 | struct sock *sk = req->data; | |
101 | struct alg_sock *ask = alg_sk(sk); | |
102 | struct skcipher_ctx *ctx = ask->private; | |
103 | struct skcipher_async_req *sreq = GET_SREQ(req, ctx); | |
104 | struct kiocb *iocb = sreq->iocb; | |
105 | ||
106 | atomic_dec(&ctx->inflight); | |
107 | skcipher_free_async_sgls(sreq); | |
108 | kfree(req); | |
237dae88 | 109 | iocb->ki_complete(iocb, err, err); |
a596999b TS |
110 | } |
111 | ||
0f6bb83c | 112 | static inline int skcipher_sndbuf(struct sock *sk) |
8ff59090 HX |
113 | { |
114 | struct alg_sock *ask = alg_sk(sk); | |
115 | struct skcipher_ctx *ctx = ask->private; | |
116 | ||
0f6bb83c HX |
117 | return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - |
118 | ctx->used, 0); | |
119 | } | |
120 | ||
121 | static inline bool skcipher_writable(struct sock *sk) | |
122 | { | |
123 | return PAGE_SIZE <= skcipher_sndbuf(sk); | |
8ff59090 HX |
124 | } |
125 | ||
126 | static int skcipher_alloc_sgl(struct sock *sk) | |
127 | { | |
128 | struct alg_sock *ask = alg_sk(sk); | |
129 | struct skcipher_ctx *ctx = ask->private; | |
130 | struct skcipher_sg_list *sgl; | |
131 | struct scatterlist *sg = NULL; | |
132 | ||
133 | sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); | |
134 | if (!list_empty(&ctx->tsgl)) | |
135 | sg = sgl->sg; | |
136 | ||
137 | if (!sg || sgl->cur >= MAX_SGL_ENTS) { | |
138 | sgl = sock_kmalloc(sk, sizeof(*sgl) + | |
139 | sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), | |
140 | GFP_KERNEL); | |
141 | if (!sgl) | |
142 | return -ENOMEM; | |
143 | ||
144 | sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); | |
145 | sgl->cur = 0; | |
146 | ||
147 | if (sg) | |
c56f6d12 | 148 | sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); |
8ff59090 HX |
149 | |
150 | list_add_tail(&sgl->list, &ctx->tsgl); | |
151 | } | |
152 | ||
153 | return 0; | |
154 | } | |
155 | ||
a596999b | 156 | static void skcipher_pull_sgl(struct sock *sk, int used, int put) |
8ff59090 HX |
157 | { |
158 | struct alg_sock *ask = alg_sk(sk); | |
159 | struct skcipher_ctx *ctx = ask->private; | |
160 | struct skcipher_sg_list *sgl; | |
161 | struct scatterlist *sg; | |
162 | int i; | |
163 | ||
164 | while (!list_empty(&ctx->tsgl)) { | |
165 | sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, | |
166 | list); | |
167 | sg = sgl->sg; | |
168 | ||
169 | for (i = 0; i < sgl->cur; i++) { | |
170 | int plen = min_t(int, used, sg[i].length); | |
171 | ||
172 | if (!sg_page(sg + i)) | |
173 | continue; | |
174 | ||
175 | sg[i].length -= plen; | |
176 | sg[i].offset += plen; | |
177 | ||
178 | used -= plen; | |
179 | ctx->used -= plen; | |
180 | ||
181 | if (sg[i].length) | |
182 | return; | |
a596999b TS |
183 | if (put) |
184 | put_page(sg_page(sg + i)); | |
8ff59090 HX |
185 | sg_assign_page(sg + i, NULL); |
186 | } | |
187 | ||
188 | list_del(&sgl->list); | |
189 | sock_kfree_s(sk, sgl, | |
190 | sizeof(*sgl) + sizeof(sgl->sg[0]) * | |
191 | (MAX_SGL_ENTS + 1)); | |
192 | } | |
193 | ||
194 | if (!ctx->used) | |
195 | ctx->merge = 0; | |
196 | } | |
197 | ||
198 | static void skcipher_free_sgl(struct sock *sk) | |
199 | { | |
200 | struct alg_sock *ask = alg_sk(sk); | |
201 | struct skcipher_ctx *ctx = ask->private; | |
202 | ||
a596999b | 203 | skcipher_pull_sgl(sk, ctx->used, 1); |
8ff59090 HX |
204 | } |
205 | ||
206 | static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) | |
207 | { | |
208 | long timeout; | |
209 | DEFINE_WAIT(wait); | |
210 | int err = -ERESTARTSYS; | |
211 | ||
212 | if (flags & MSG_DONTWAIT) | |
213 | return -EAGAIN; | |
214 | ||
215 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | |
216 | ||
217 | for (;;) { | |
218 | if (signal_pending(current)) | |
219 | break; | |
220 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | |
221 | timeout = MAX_SCHEDULE_TIMEOUT; | |
222 | if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) { | |
223 | err = 0; | |
224 | break; | |
225 | } | |
226 | } | |
227 | finish_wait(sk_sleep(sk), &wait); | |
228 | ||
229 | return err; | |
230 | } | |
231 | ||
232 | static void skcipher_wmem_wakeup(struct sock *sk) | |
233 | { | |
234 | struct socket_wq *wq; | |
235 | ||
236 | if (!skcipher_writable(sk)) | |
237 | return; | |
238 | ||
239 | rcu_read_lock(); | |
240 | wq = rcu_dereference(sk->sk_wq); | |
241 | if (wq_has_sleeper(wq)) | |
242 | wake_up_interruptible_sync_poll(&wq->wait, POLLIN | | |
243 | POLLRDNORM | | |
244 | POLLRDBAND); | |
245 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | |
246 | rcu_read_unlock(); | |
247 | } | |
248 | ||
249 | static int skcipher_wait_for_data(struct sock *sk, unsigned flags) | |
250 | { | |
251 | struct alg_sock *ask = alg_sk(sk); | |
252 | struct skcipher_ctx *ctx = ask->private; | |
253 | long timeout; | |
254 | DEFINE_WAIT(wait); | |
255 | int err = -ERESTARTSYS; | |
256 | ||
257 | if (flags & MSG_DONTWAIT) { | |
258 | return -EAGAIN; | |
259 | } | |
260 | ||
261 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | |
262 | ||
263 | for (;;) { | |
264 | if (signal_pending(current)) | |
265 | break; | |
266 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | |
267 | timeout = MAX_SCHEDULE_TIMEOUT; | |
268 | if (sk_wait_event(sk, &timeout, ctx->used)) { | |
269 | err = 0; | |
270 | break; | |
271 | } | |
272 | } | |
273 | finish_wait(sk_sleep(sk), &wait); | |
274 | ||
275 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | |
276 | ||
277 | return err; | |
278 | } | |
279 | ||
280 | static void skcipher_data_wakeup(struct sock *sk) | |
281 | { | |
282 | struct alg_sock *ask = alg_sk(sk); | |
283 | struct skcipher_ctx *ctx = ask->private; | |
284 | struct socket_wq *wq; | |
285 | ||
286 | if (!ctx->used) | |
287 | return; | |
288 | ||
289 | rcu_read_lock(); | |
290 | wq = rcu_dereference(sk->sk_wq); | |
291 | if (wq_has_sleeper(wq)) | |
292 | wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | | |
293 | POLLRDNORM | | |
294 | POLLRDBAND); | |
295 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | |
296 | rcu_read_unlock(); | |
297 | } | |
298 | ||
1b784140 YX |
299 | static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, |
300 | size_t size) | |
8ff59090 HX |
301 | { |
302 | struct sock *sk = sock->sk; | |
303 | struct alg_sock *ask = alg_sk(sk); | |
304 | struct skcipher_ctx *ctx = ask->private; | |
305 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); | |
306 | unsigned ivsize = crypto_ablkcipher_ivsize(tfm); | |
307 | struct skcipher_sg_list *sgl; | |
308 | struct af_alg_control con = {}; | |
309 | long copied = 0; | |
310 | bool enc = 0; | |
f26b7b80 | 311 | bool init = 0; |
8ff59090 HX |
312 | int err; |
313 | int i; | |
314 | ||
315 | if (msg->msg_controllen) { | |
316 | err = af_alg_cmsg_send(msg, &con); | |
317 | if (err) | |
318 | return err; | |
319 | ||
f26b7b80 | 320 | init = 1; |
8ff59090 HX |
321 | switch (con.op) { |
322 | case ALG_OP_ENCRYPT: | |
323 | enc = 1; | |
324 | break; | |
325 | case ALG_OP_DECRYPT: | |
326 | enc = 0; | |
327 | break; | |
328 | default: | |
329 | return -EINVAL; | |
330 | } | |
331 | ||
332 | if (con.iv && con.iv->ivlen != ivsize) | |
333 | return -EINVAL; | |
334 | } | |
335 | ||
336 | err = -EINVAL; | |
337 | ||
338 | lock_sock(sk); | |
339 | if (!ctx->more && ctx->used) | |
340 | goto unlock; | |
341 | ||
f26b7b80 | 342 | if (init) { |
8ff59090 HX |
343 | ctx->enc = enc; |
344 | if (con.iv) | |
345 | memcpy(ctx->iv, con.iv->iv, ivsize); | |
346 | } | |
347 | ||
8ff59090 HX |
348 | while (size) { |
349 | struct scatterlist *sg; | |
350 | unsigned long len = size; | |
351 | int plen; | |
352 | ||
353 | if (ctx->merge) { | |
354 | sgl = list_entry(ctx->tsgl.prev, | |
355 | struct skcipher_sg_list, list); | |
356 | sg = sgl->sg + sgl->cur - 1; | |
357 | len = min_t(unsigned long, len, | |
358 | PAGE_SIZE - sg->offset - sg->length); | |
359 | ||
6ce8e9ce AV |
360 | err = memcpy_from_msg(page_address(sg_page(sg)) + |
361 | sg->offset + sg->length, | |
362 | msg, len); | |
8ff59090 HX |
363 | if (err) |
364 | goto unlock; | |
365 | ||
366 | sg->length += len; | |
367 | ctx->merge = (sg->offset + sg->length) & | |
368 | (PAGE_SIZE - 1); | |
369 | ||
370 | ctx->used += len; | |
371 | copied += len; | |
372 | size -= len; | |
8ff59090 HX |
373 | continue; |
374 | } | |
375 | ||
0f6bb83c | 376 | if (!skcipher_writable(sk)) { |
8ff59090 HX |
377 | err = skcipher_wait_for_wmem(sk, msg->msg_flags); |
378 | if (err) | |
379 | goto unlock; | |
8ff59090 HX |
380 | } |
381 | ||
0f6bb83c | 382 | len = min_t(unsigned long, len, skcipher_sndbuf(sk)); |
8ff59090 HX |
383 | |
384 | err = skcipher_alloc_sgl(sk); | |
385 | if (err) | |
386 | goto unlock; | |
387 | ||
388 | sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); | |
389 | sg = sgl->sg; | |
0f477b65 | 390 | sg_unmark_end(sg + sgl->cur); |
8ff59090 HX |
391 | do { |
392 | i = sgl->cur; | |
393 | plen = min_t(int, len, PAGE_SIZE); | |
394 | ||
395 | sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); | |
396 | err = -ENOMEM; | |
397 | if (!sg_page(sg + i)) | |
398 | goto unlock; | |
399 | ||
6ce8e9ce AV |
400 | err = memcpy_from_msg(page_address(sg_page(sg + i)), |
401 | msg, plen); | |
8ff59090 HX |
402 | if (err) { |
403 | __free_page(sg_page(sg + i)); | |
404 | sg_assign_page(sg + i, NULL); | |
405 | goto unlock; | |
406 | } | |
407 | ||
408 | sg[i].length = plen; | |
409 | len -= plen; | |
410 | ctx->used += plen; | |
411 | copied += plen; | |
412 | size -= plen; | |
8ff59090 HX |
413 | sgl->cur++; |
414 | } while (len && sgl->cur < MAX_SGL_ENTS); | |
415 | ||
0f477b65 TS |
416 | if (!size) |
417 | sg_mark_end(sg + sgl->cur - 1); | |
418 | ||
8ff59090 HX |
419 | ctx->merge = plen & (PAGE_SIZE - 1); |
420 | } | |
421 | ||
422 | err = 0; | |
423 | ||
424 | ctx->more = msg->msg_flags & MSG_MORE; | |
8ff59090 HX |
425 | |
426 | unlock: | |
427 | skcipher_data_wakeup(sk); | |
428 | release_sock(sk); | |
429 | ||
430 | return copied ?: err; | |
431 | } | |
432 | ||
433 | static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, | |
434 | int offset, size_t size, int flags) | |
435 | { | |
436 | struct sock *sk = sock->sk; | |
437 | struct alg_sock *ask = alg_sk(sk); | |
438 | struct skcipher_ctx *ctx = ask->private; | |
439 | struct skcipher_sg_list *sgl; | |
440 | int err = -EINVAL; | |
8ff59090 | 441 | |
d3f7d56a SL |
442 | if (flags & MSG_SENDPAGE_NOTLAST) |
443 | flags |= MSG_MORE; | |
444 | ||
8ff59090 HX |
445 | lock_sock(sk); |
446 | if (!ctx->more && ctx->used) | |
447 | goto unlock; | |
448 | ||
449 | if (!size) | |
450 | goto done; | |
451 | ||
0f6bb83c | 452 | if (!skcipher_writable(sk)) { |
8ff59090 HX |
453 | err = skcipher_wait_for_wmem(sk, flags); |
454 | if (err) | |
455 | goto unlock; | |
8ff59090 HX |
456 | } |
457 | ||
458 | err = skcipher_alloc_sgl(sk); | |
459 | if (err) | |
460 | goto unlock; | |
461 | ||
462 | ctx->merge = 0; | |
463 | sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); | |
464 | ||
0f477b65 TS |
465 | if (sgl->cur) |
466 | sg_unmark_end(sgl->sg + sgl->cur - 1); | |
467 | ||
468 | sg_mark_end(sgl->sg + sgl->cur); | |
8ff59090 HX |
469 | get_page(page); |
470 | sg_set_page(sgl->sg + sgl->cur, page, size, offset); | |
471 | sgl->cur++; | |
472 | ctx->used += size; | |
473 | ||
474 | done: | |
475 | ctx->more = flags & MSG_MORE; | |
8ff59090 HX |
476 | |
477 | unlock: | |
478 | skcipher_data_wakeup(sk); | |
479 | release_sock(sk); | |
480 | ||
481 | return err ?: size; | |
482 | } | |
483 | ||
a596999b TS |
484 | static int skcipher_all_sg_nents(struct skcipher_ctx *ctx) |
485 | { | |
486 | struct skcipher_sg_list *sgl; | |
487 | struct scatterlist *sg; | |
488 | int nents = 0; | |
489 | ||
490 | list_for_each_entry(sgl, &ctx->tsgl, list) { | |
491 | sg = sgl->sg; | |
492 | ||
493 | while (!sg->length) | |
494 | sg++; | |
495 | ||
496 | nents += sg_nents(sg); | |
497 | } | |
498 | return nents; | |
499 | } | |
500 | ||
501 | static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, | |
502 | int flags) | |
503 | { | |
504 | struct sock *sk = sock->sk; | |
505 | struct alg_sock *ask = alg_sk(sk); | |
506 | struct skcipher_ctx *ctx = ask->private; | |
507 | struct skcipher_sg_list *sgl; | |
508 | struct scatterlist *sg; | |
509 | struct skcipher_async_req *sreq; | |
510 | struct ablkcipher_request *req; | |
511 | struct skcipher_async_rsgl *last_rsgl = NULL; | |
033f46b3 | 512 | unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx); |
a596999b TS |
513 | unsigned int reqlen = sizeof(struct skcipher_async_req) + |
514 | GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx); | |
a596999b | 515 | int err = -ENOMEM; |
033f46b3 | 516 | bool mark = false; |
a596999b TS |
517 | |
518 | lock_sock(sk); | |
519 | req = kmalloc(reqlen, GFP_KERNEL); | |
520 | if (unlikely(!req)) | |
521 | goto unlock; | |
522 | ||
523 | sreq = GET_SREQ(req, ctx); | |
524 | sreq->iocb = msg->msg_iocb; | |
525 | memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl)); | |
526 | INIT_LIST_HEAD(&sreq->list); | |
527 | sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL); | |
528 | if (unlikely(!sreq->tsg)) { | |
529 | kfree(req); | |
530 | goto unlock; | |
531 | } | |
532 | sg_init_table(sreq->tsg, tx_nents); | |
533 | memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx)); | |
534 | ablkcipher_request_set_tfm(req, crypto_ablkcipher_reqtfm(&ctx->req)); | |
535 | ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | |
536 | skcipher_async_cb, sk); | |
537 | ||
538 | while (iov_iter_count(&msg->msg_iter)) { | |
539 | struct skcipher_async_rsgl *rsgl; | |
ac110f49 | 540 | int used; |
a596999b TS |
541 | |
542 | if (!ctx->used) { | |
543 | err = skcipher_wait_for_data(sk, flags); | |
544 | if (err) | |
545 | goto free; | |
546 | } | |
547 | sgl = list_first_entry(&ctx->tsgl, | |
548 | struct skcipher_sg_list, list); | |
549 | sg = sgl->sg; | |
550 | ||
551 | while (!sg->length) | |
552 | sg++; | |
553 | ||
554 | used = min_t(unsigned long, ctx->used, | |
555 | iov_iter_count(&msg->msg_iter)); | |
556 | used = min_t(unsigned long, used, sg->length); | |
557 | ||
033f46b3 | 558 | if (txbufs == tx_nents) { |
a596999b TS |
559 | struct scatterlist *tmp; |
560 | int x; | |
561 | /* Ran out of tx slots in async request | |
562 | * need to expand */ | |
563 | tmp = kcalloc(tx_nents * 2, sizeof(*tmp), | |
564 | GFP_KERNEL); | |
565 | if (!tmp) | |
566 | goto free; | |
567 | ||
568 | sg_init_table(tmp, tx_nents * 2); | |
569 | for (x = 0; x < tx_nents; x++) | |
570 | sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]), | |
571 | sreq->tsg[x].length, | |
572 | sreq->tsg[x].offset); | |
573 | kfree(sreq->tsg); | |
574 | sreq->tsg = tmp; | |
575 | tx_nents *= 2; | |
033f46b3 | 576 | mark = true; |
a596999b TS |
577 | } |
578 | /* Need to take over the tx sgl from ctx | |
579 | * to the asynch req - these sgls will be freed later */ | |
033f46b3 | 580 | sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length, |
a596999b TS |
581 | sg->offset); |
582 | ||
583 | if (list_empty(&sreq->list)) { | |
584 | rsgl = &sreq->first_sgl; | |
585 | list_add_tail(&rsgl->list, &sreq->list); | |
586 | } else { | |
82d92920 | 587 | rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL); |
a596999b TS |
588 | if (!rsgl) { |
589 | err = -ENOMEM; | |
590 | goto free; | |
591 | } | |
592 | list_add_tail(&rsgl->list, &sreq->list); | |
593 | } | |
594 | ||
595 | used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used); | |
596 | err = used; | |
597 | if (used < 0) | |
598 | goto free; | |
599 | if (last_rsgl) | |
600 | af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); | |
601 | ||
602 | last_rsgl = rsgl; | |
603 | len += used; | |
604 | skcipher_pull_sgl(sk, used, 0); | |
605 | iov_iter_advance(&msg->msg_iter, used); | |
606 | } | |
607 | ||
033f46b3 | 608 | if (mark) |
609 | sg_mark_end(sreq->tsg + txbufs - 1); | |
610 | ||
a596999b TS |
611 | ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, |
612 | len, sreq->iv); | |
613 | err = ctx->enc ? crypto_ablkcipher_encrypt(req) : | |
614 | crypto_ablkcipher_decrypt(req); | |
615 | if (err == -EINPROGRESS) { | |
616 | atomic_inc(&ctx->inflight); | |
617 | err = -EIOCBQUEUED; | |
618 | goto unlock; | |
619 | } | |
620 | free: | |
621 | skcipher_free_async_sgls(sreq); | |
622 | kfree(req); | |
623 | unlock: | |
624 | skcipher_wmem_wakeup(sk); | |
625 | release_sock(sk); | |
626 | return err; | |
627 | } | |
628 | ||
629 | static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, | |
630 | int flags) | |
8ff59090 HX |
631 | { |
632 | struct sock *sk = sock->sk; | |
633 | struct alg_sock *ask = alg_sk(sk); | |
634 | struct skcipher_ctx *ctx = ask->private; | |
635 | unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm( | |
636 | &ctx->req)); | |
637 | struct skcipher_sg_list *sgl; | |
638 | struct scatterlist *sg; | |
8ff59090 HX |
639 | int err = -EAGAIN; |
640 | int used; | |
641 | long copied = 0; | |
642 | ||
643 | lock_sock(sk); | |
01e97e65 | 644 | while (msg_data_left(msg)) { |
1d10eb2f AV |
645 | sgl = list_first_entry(&ctx->tsgl, |
646 | struct skcipher_sg_list, list); | |
647 | sg = sgl->sg; | |
8ff59090 | 648 | |
1d10eb2f AV |
649 | while (!sg->length) |
650 | sg++; | |
8ff59090 | 651 | |
9399f0c5 | 652 | if (!ctx->used) { |
1d10eb2f AV |
653 | err = skcipher_wait_for_data(sk, flags); |
654 | if (err) | |
bc97e57e | 655 | goto unlock; |
1d10eb2f AV |
656 | } |
657 | ||
01e97e65 | 658 | used = min_t(unsigned long, ctx->used, msg_data_left(msg)); |
1d10eb2f AV |
659 | |
660 | used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used); | |
661 | err = used; | |
662 | if (err < 0) | |
663 | goto unlock; | |
bc97e57e | 664 | |
1d10eb2f AV |
665 | if (ctx->more || used < ctx->used) |
666 | used -= used % bs; | |
8ff59090 | 667 | |
1d10eb2f AV |
668 | err = -EINVAL; |
669 | if (!used) | |
670 | goto free; | |
8ff59090 | 671 | |
1d10eb2f AV |
672 | ablkcipher_request_set_crypt(&ctx->req, sg, |
673 | ctx->rsgl.sg, used, | |
674 | ctx->iv); | |
8ff59090 | 675 | |
1d10eb2f | 676 | err = af_alg_wait_for_completion( |
8ff59090 HX |
677 | ctx->enc ? |
678 | crypto_ablkcipher_encrypt(&ctx->req) : | |
679 | crypto_ablkcipher_decrypt(&ctx->req), | |
680 | &ctx->completion); | |
681 | ||
bc97e57e | 682 | free: |
1d10eb2f | 683 | af_alg_free_sg(&ctx->rsgl); |
8ff59090 | 684 | |
1d10eb2f AV |
685 | if (err) |
686 | goto unlock; | |
8ff59090 | 687 | |
1d10eb2f | 688 | copied += used; |
a596999b | 689 | skcipher_pull_sgl(sk, used, 1); |
1d10eb2f | 690 | iov_iter_advance(&msg->msg_iter, used); |
8ff59090 HX |
691 | } |
692 | ||
693 | err = 0; | |
694 | ||
695 | unlock: | |
696 | skcipher_wmem_wakeup(sk); | |
697 | release_sock(sk); | |
698 | ||
699 | return copied ?: err; | |
700 | } | |
701 | ||
a596999b TS |
702 | static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg, |
703 | size_t ignored, int flags) | |
704 | { | |
705 | return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ? | |
706 | skcipher_recvmsg_async(sock, msg, flags) : | |
707 | skcipher_recvmsg_sync(sock, msg, flags); | |
708 | } | |
8ff59090 HX |
709 | |
710 | static unsigned int skcipher_poll(struct file *file, struct socket *sock, | |
711 | poll_table *wait) | |
712 | { | |
713 | struct sock *sk = sock->sk; | |
714 | struct alg_sock *ask = alg_sk(sk); | |
715 | struct skcipher_ctx *ctx = ask->private; | |
716 | unsigned int mask; | |
717 | ||
718 | sock_poll_wait(file, sk_sleep(sk), wait); | |
719 | mask = 0; | |
720 | ||
721 | if (ctx->used) | |
722 | mask |= POLLIN | POLLRDNORM; | |
723 | ||
724 | if (skcipher_writable(sk)) | |
725 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | |
726 | ||
727 | return mask; | |
728 | } | |
729 | ||
730 | static struct proto_ops algif_skcipher_ops = { | |
731 | .family = PF_ALG, | |
732 | ||
733 | .connect = sock_no_connect, | |
734 | .socketpair = sock_no_socketpair, | |
735 | .getname = sock_no_getname, | |
736 | .ioctl = sock_no_ioctl, | |
737 | .listen = sock_no_listen, | |
738 | .shutdown = sock_no_shutdown, | |
739 | .getsockopt = sock_no_getsockopt, | |
740 | .mmap = sock_no_mmap, | |
741 | .bind = sock_no_bind, | |
742 | .accept = sock_no_accept, | |
743 | .setsockopt = sock_no_setsockopt, | |
744 | ||
745 | .release = af_alg_release, | |
746 | .sendmsg = skcipher_sendmsg, | |
747 | .sendpage = skcipher_sendpage, | |
748 | .recvmsg = skcipher_recvmsg, | |
749 | .poll = skcipher_poll, | |
750 | }; | |
751 | ||
752 | static void *skcipher_bind(const char *name, u32 type, u32 mask) | |
753 | { | |
754 | return crypto_alloc_ablkcipher(name, type, mask); | |
755 | } | |
756 | ||
757 | static void skcipher_release(void *private) | |
758 | { | |
759 | crypto_free_ablkcipher(private); | |
760 | } | |
761 | ||
762 | static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) | |
763 | { | |
764 | return crypto_ablkcipher_setkey(private, key, keylen); | |
765 | } | |
766 | ||
a596999b TS |
767 | static void skcipher_wait(struct sock *sk) |
768 | { | |
769 | struct alg_sock *ask = alg_sk(sk); | |
770 | struct skcipher_ctx *ctx = ask->private; | |
771 | int ctr = 0; | |
772 | ||
773 | while (atomic_read(&ctx->inflight) && ctr++ < 100) | |
774 | msleep(100); | |
775 | } | |
776 | ||
8ff59090 HX |
777 | static void skcipher_sock_destruct(struct sock *sk) |
778 | { | |
779 | struct alg_sock *ask = alg_sk(sk); | |
780 | struct skcipher_ctx *ctx = ask->private; | |
781 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); | |
782 | ||
a596999b TS |
783 | if (atomic_read(&ctx->inflight)) |
784 | skcipher_wait(sk); | |
785 | ||
8ff59090 | 786 | skcipher_free_sgl(sk); |
79e88659 | 787 | sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm)); |
8ff59090 HX |
788 | sock_kfree_s(sk, ctx, ctx->len); |
789 | af_alg_release_parent(sk); | |
790 | } | |
791 | ||
792 | static int skcipher_accept_parent(void *private, struct sock *sk) | |
793 | { | |
794 | struct skcipher_ctx *ctx; | |
795 | struct alg_sock *ask = alg_sk(sk); | |
796 | unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private); | |
797 | ||
798 | ctx = sock_kmalloc(sk, len, GFP_KERNEL); | |
799 | if (!ctx) | |
800 | return -ENOMEM; | |
801 | ||
802 | ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private), | |
803 | GFP_KERNEL); | |
804 | if (!ctx->iv) { | |
805 | sock_kfree_s(sk, ctx, len); | |
806 | return -ENOMEM; | |
807 | } | |
808 | ||
809 | memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private)); | |
810 | ||
811 | INIT_LIST_HEAD(&ctx->tsgl); | |
812 | ctx->len = len; | |
813 | ctx->used = 0; | |
814 | ctx->more = 0; | |
815 | ctx->merge = 0; | |
816 | ctx->enc = 0; | |
a596999b | 817 | atomic_set(&ctx->inflight, 0); |
8ff59090 HX |
818 | af_alg_init_completion(&ctx->completion); |
819 | ||
820 | ask->private = ctx; | |
821 | ||
822 | ablkcipher_request_set_tfm(&ctx->req, private); | |
823 | ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, | |
824 | af_alg_complete, &ctx->completion); | |
825 | ||
826 | sk->sk_destruct = skcipher_sock_destruct; | |
827 | ||
828 | return 0; | |
829 | } | |
830 | ||
831 | static const struct af_alg_type algif_type_skcipher = { | |
832 | .bind = skcipher_bind, | |
833 | .release = skcipher_release, | |
834 | .setkey = skcipher_setkey, | |
835 | .accept = skcipher_accept_parent, | |
836 | .ops = &algif_skcipher_ops, | |
837 | .name = "skcipher", | |
838 | .owner = THIS_MODULE | |
839 | }; | |
840 | ||
841 | static int __init algif_skcipher_init(void) | |
842 | { | |
843 | return af_alg_register_type(&algif_type_skcipher); | |
844 | } | |
845 | ||
846 | static void __exit algif_skcipher_exit(void) | |
847 | { | |
848 | int err = af_alg_unregister_type(&algif_type_skcipher); | |
849 | BUG_ON(err); | |
850 | } | |
851 | ||
852 | module_init(algif_skcipher_init); | |
853 | module_exit(algif_skcipher_exit); | |
854 | MODULE_LICENSE("GPL"); |