tracing: extend sched_pi_setprio
[deliverable/linux.git] / crypto / ahash.c
1 /*
2 * Asynchronous Cryptographic Hash operations.
3 *
4 * This is the asynchronous version of hash.c with notification of
5 * completion via a callback.
6 *
7 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/bug.h>
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/seq_file.h>
25 #include <linux/cryptouser.h>
26 #include <net/netlink.h>
27
28 #include "internal.h"
29
30 struct ahash_request_priv {
31 crypto_completion_t complete;
32 void *data;
33 u8 *result;
34 void *ubuf[] CRYPTO_MINALIGN_ATTR;
35 };
36
37 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
38 {
39 return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
40 halg);
41 }
42
43 static int hash_walk_next(struct crypto_hash_walk *walk)
44 {
45 unsigned int alignmask = walk->alignmask;
46 unsigned int offset = walk->offset;
47 unsigned int nbytes = min(walk->entrylen,
48 ((unsigned int)(PAGE_SIZE)) - offset);
49
50 if (walk->flags & CRYPTO_ALG_ASYNC)
51 walk->data = kmap(walk->pg);
52 else
53 walk->data = kmap_atomic(walk->pg);
54 walk->data += offset;
55
56 if (offset & alignmask) {
57 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
58
59 if (nbytes > unaligned)
60 nbytes = unaligned;
61 }
62
63 walk->entrylen -= nbytes;
64 return nbytes;
65 }
66
67 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
68 {
69 struct scatterlist *sg;
70
71 sg = walk->sg;
72 walk->offset = sg->offset;
73 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
74 walk->offset = offset_in_page(walk->offset);
75 walk->entrylen = sg->length;
76
77 if (walk->entrylen > walk->total)
78 walk->entrylen = walk->total;
79 walk->total -= walk->entrylen;
80
81 return hash_walk_next(walk);
82 }
83
84 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
85 {
86 unsigned int alignmask = walk->alignmask;
87 unsigned int nbytes = walk->entrylen;
88
89 walk->data -= walk->offset;
90
91 if (nbytes && walk->offset & alignmask && !err) {
92 walk->offset = ALIGN(walk->offset, alignmask + 1);
93 walk->data += walk->offset;
94
95 nbytes = min(nbytes,
96 ((unsigned int)(PAGE_SIZE)) - walk->offset);
97 walk->entrylen -= nbytes;
98
99 return nbytes;
100 }
101
102 if (walk->flags & CRYPTO_ALG_ASYNC)
103 kunmap(walk->pg);
104 else {
105 kunmap_atomic(walk->data);
106 /*
107 * The may sleep test only makes sense for sync users.
108 * Async users don't need to sleep here anyway.
109 */
110 crypto_yield(walk->flags);
111 }
112
113 if (err)
114 return err;
115
116 if (nbytes) {
117 walk->offset = 0;
118 walk->pg++;
119 return hash_walk_next(walk);
120 }
121
122 if (!walk->total)
123 return 0;
124
125 walk->sg = sg_next(walk->sg);
126
127 return hash_walk_new_entry(walk);
128 }
129 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
130
131 int crypto_hash_walk_first(struct ahash_request *req,
132 struct crypto_hash_walk *walk)
133 {
134 walk->total = req->nbytes;
135
136 if (!walk->total) {
137 walk->entrylen = 0;
138 return 0;
139 }
140
141 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
142 walk->sg = req->src;
143 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
144
145 return hash_walk_new_entry(walk);
146 }
147 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
148
149 int crypto_ahash_walk_first(struct ahash_request *req,
150 struct crypto_hash_walk *walk)
151 {
152 walk->total = req->nbytes;
153
154 if (!walk->total) {
155 walk->entrylen = 0;
156 return 0;
157 }
158
159 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
160 walk->sg = req->src;
161 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
162 walk->flags |= CRYPTO_ALG_ASYNC;
163
164 BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
165
166 return hash_walk_new_entry(walk);
167 }
168 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
169
170 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
171 unsigned int keylen)
172 {
173 unsigned long alignmask = crypto_ahash_alignmask(tfm);
174 int ret;
175 u8 *buffer, *alignbuffer;
176 unsigned long absize;
177
178 absize = keylen + alignmask;
179 buffer = kmalloc(absize, GFP_KERNEL);
180 if (!buffer)
181 return -ENOMEM;
182
183 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
184 memcpy(alignbuffer, key, keylen);
185 ret = tfm->setkey(tfm, alignbuffer, keylen);
186 kzfree(buffer);
187 return ret;
188 }
189
190 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
191 unsigned int keylen)
192 {
193 unsigned long alignmask = crypto_ahash_alignmask(tfm);
194
195 if ((unsigned long)key & alignmask)
196 return ahash_setkey_unaligned(tfm, key, keylen);
197
198 return tfm->setkey(tfm, key, keylen);
199 }
200 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
201
202 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
203 unsigned int keylen)
204 {
205 return -ENOSYS;
206 }
207
208 static inline unsigned int ahash_align_buffer_size(unsigned len,
209 unsigned long mask)
210 {
211 return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
212 }
213
214 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
215 {
216 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
217 unsigned long alignmask = crypto_ahash_alignmask(tfm);
218 unsigned int ds = crypto_ahash_digestsize(tfm);
219 struct ahash_request_priv *priv;
220
221 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
222 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
223 GFP_KERNEL : GFP_ATOMIC);
224 if (!priv)
225 return -ENOMEM;
226
227 /*
228 * WARNING: Voodoo programming below!
229 *
230 * The code below is obscure and hard to understand, thus explanation
231 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
232 * to understand the layout of structures used here!
233 *
234 * The code here will replace portions of the ORIGINAL request with
235 * pointers to new code and buffers so the hashing operation can store
236 * the result in aligned buffer. We will call the modified request
237 * an ADJUSTED request.
238 *
239 * The newly mangled request will look as such:
240 *
241 * req {
242 * .result = ADJUSTED[new aligned buffer]
243 * .base.complete = ADJUSTED[pointer to completion function]
244 * .base.data = ADJUSTED[*req (pointer to self)]
245 * .priv = ADJUSTED[new priv] {
246 * .result = ORIGINAL(result)
247 * .complete = ORIGINAL(base.complete)
248 * .data = ORIGINAL(base.data)
249 * }
250 */
251
252 priv->result = req->result;
253 priv->complete = req->base.complete;
254 priv->data = req->base.data;
255 /*
256 * WARNING: We do not backup req->priv here! The req->priv
257 * is for internal use of the Crypto API and the
258 * user must _NOT_ _EVER_ depend on it's content!
259 */
260
261 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
262 req->base.complete = cplt;
263 req->base.data = req;
264 req->priv = priv;
265
266 return 0;
267 }
268
269 static void ahash_restore_req(struct ahash_request *req)
270 {
271 struct ahash_request_priv *priv = req->priv;
272
273 /* Restore the original crypto request. */
274 req->result = priv->result;
275 req->base.complete = priv->complete;
276 req->base.data = priv->data;
277 req->priv = NULL;
278
279 /* Free the req->priv.priv from the ADJUSTED request. */
280 kzfree(priv);
281 }
282
283 static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
284 {
285 struct ahash_request_priv *priv = req->priv;
286
287 if (err == -EINPROGRESS)
288 return;
289
290 if (!err)
291 memcpy(priv->result, req->result,
292 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
293
294 ahash_restore_req(req);
295 }
296
297 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
298 {
299 struct ahash_request *areq = req->data;
300
301 /*
302 * Restore the original request, see ahash_op_unaligned() for what
303 * goes where.
304 *
305 * The "struct ahash_request *req" here is in fact the "req.base"
306 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
307 * is a pointer to self, it is also the ADJUSTED "req" .
308 */
309
310 /* First copy req->result into req->priv.result */
311 ahash_op_unaligned_finish(areq, err);
312
313 /* Complete the ORIGINAL request. */
314 areq->base.complete(&areq->base, err);
315 }
316
317 static int ahash_op_unaligned(struct ahash_request *req,
318 int (*op)(struct ahash_request *))
319 {
320 int err;
321
322 err = ahash_save_req(req, ahash_op_unaligned_done);
323 if (err)
324 return err;
325
326 err = op(req);
327 ahash_op_unaligned_finish(req, err);
328
329 return err;
330 }
331
332 static int crypto_ahash_op(struct ahash_request *req,
333 int (*op)(struct ahash_request *))
334 {
335 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
336 unsigned long alignmask = crypto_ahash_alignmask(tfm);
337
338 if ((unsigned long)req->result & alignmask)
339 return ahash_op_unaligned(req, op);
340
341 return op(req);
342 }
343
344 int crypto_ahash_final(struct ahash_request *req)
345 {
346 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
347 }
348 EXPORT_SYMBOL_GPL(crypto_ahash_final);
349
350 int crypto_ahash_finup(struct ahash_request *req)
351 {
352 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
353 }
354 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
355
356 int crypto_ahash_digest(struct ahash_request *req)
357 {
358 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
359 }
360 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
361
362 static void ahash_def_finup_finish2(struct ahash_request *req, int err)
363 {
364 struct ahash_request_priv *priv = req->priv;
365
366 if (err == -EINPROGRESS)
367 return;
368
369 if (!err)
370 memcpy(priv->result, req->result,
371 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
372
373 ahash_restore_req(req);
374 }
375
376 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
377 {
378 struct ahash_request *areq = req->data;
379
380 ahash_def_finup_finish2(areq, err);
381
382 areq->base.complete(&areq->base, err);
383 }
384
385 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
386 {
387 if (err)
388 goto out;
389
390 req->base.complete = ahash_def_finup_done2;
391 req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
392 err = crypto_ahash_reqtfm(req)->final(req);
393
394 out:
395 ahash_def_finup_finish2(req, err);
396 return err;
397 }
398
399 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
400 {
401 struct ahash_request *areq = req->data;
402
403 err = ahash_def_finup_finish1(areq, err);
404
405 areq->base.complete(&areq->base, err);
406 }
407
408 static int ahash_def_finup(struct ahash_request *req)
409 {
410 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
411 int err;
412
413 err = ahash_save_req(req, ahash_def_finup_done1);
414 if (err)
415 return err;
416
417 err = tfm->update(req);
418 return ahash_def_finup_finish1(req, err);
419 }
420
421 static int ahash_no_export(struct ahash_request *req, void *out)
422 {
423 return -ENOSYS;
424 }
425
426 static int ahash_no_import(struct ahash_request *req, const void *in)
427 {
428 return -ENOSYS;
429 }
430
431 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
432 {
433 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
434 struct ahash_alg *alg = crypto_ahash_alg(hash);
435
436 hash->setkey = ahash_nosetkey;
437 hash->has_setkey = false;
438 hash->export = ahash_no_export;
439 hash->import = ahash_no_import;
440
441 if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
442 return crypto_init_shash_ops_async(tfm);
443
444 hash->init = alg->init;
445 hash->update = alg->update;
446 hash->final = alg->final;
447 hash->finup = alg->finup ?: ahash_def_finup;
448 hash->digest = alg->digest;
449
450 if (alg->setkey) {
451 hash->setkey = alg->setkey;
452 hash->has_setkey = true;
453 }
454 if (alg->export)
455 hash->export = alg->export;
456 if (alg->import)
457 hash->import = alg->import;
458
459 return 0;
460 }
461
462 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
463 {
464 if (alg->cra_type != &crypto_ahash_type)
465 return sizeof(struct crypto_shash *);
466
467 return crypto_alg_extsize(alg);
468 }
469
470 #ifdef CONFIG_NET
471 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
472 {
473 struct crypto_report_hash rhash;
474
475 strncpy(rhash.type, "ahash", sizeof(rhash.type));
476
477 rhash.blocksize = alg->cra_blocksize;
478 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
479
480 if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
481 sizeof(struct crypto_report_hash), &rhash))
482 goto nla_put_failure;
483 return 0;
484
485 nla_put_failure:
486 return -EMSGSIZE;
487 }
488 #else
489 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
490 {
491 return -ENOSYS;
492 }
493 #endif
494
495 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
496 __attribute__ ((unused));
497 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
498 {
499 seq_printf(m, "type : ahash\n");
500 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
501 "yes" : "no");
502 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
503 seq_printf(m, "digestsize : %u\n",
504 __crypto_hash_alg_common(alg)->digestsize);
505 }
506
507 const struct crypto_type crypto_ahash_type = {
508 .extsize = crypto_ahash_extsize,
509 .init_tfm = crypto_ahash_init_tfm,
510 #ifdef CONFIG_PROC_FS
511 .show = crypto_ahash_show,
512 #endif
513 .report = crypto_ahash_report,
514 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
515 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
516 .type = CRYPTO_ALG_TYPE_AHASH,
517 .tfmsize = offsetof(struct crypto_ahash, base),
518 };
519 EXPORT_SYMBOL_GPL(crypto_ahash_type);
520
521 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
522 u32 mask)
523 {
524 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
525 }
526 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
527
528 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
529 {
530 return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
531 }
532 EXPORT_SYMBOL_GPL(crypto_has_ahash);
533
534 static int ahash_prepare_alg(struct ahash_alg *alg)
535 {
536 struct crypto_alg *base = &alg->halg.base;
537
538 if (alg->halg.digestsize > PAGE_SIZE / 8 ||
539 alg->halg.statesize > PAGE_SIZE / 8 ||
540 alg->halg.statesize == 0)
541 return -EINVAL;
542
543 base->cra_type = &crypto_ahash_type;
544 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
545 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
546
547 return 0;
548 }
549
550 int crypto_register_ahash(struct ahash_alg *alg)
551 {
552 struct crypto_alg *base = &alg->halg.base;
553 int err;
554
555 err = ahash_prepare_alg(alg);
556 if (err)
557 return err;
558
559 return crypto_register_alg(base);
560 }
561 EXPORT_SYMBOL_GPL(crypto_register_ahash);
562
563 int crypto_unregister_ahash(struct ahash_alg *alg)
564 {
565 return crypto_unregister_alg(&alg->halg.base);
566 }
567 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
568
569 int ahash_register_instance(struct crypto_template *tmpl,
570 struct ahash_instance *inst)
571 {
572 int err;
573
574 err = ahash_prepare_alg(&inst->alg);
575 if (err)
576 return err;
577
578 return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
579 }
580 EXPORT_SYMBOL_GPL(ahash_register_instance);
581
582 void ahash_free_instance(struct crypto_instance *inst)
583 {
584 crypto_drop_spawn(crypto_instance_ctx(inst));
585 kfree(ahash_instance(inst));
586 }
587 EXPORT_SYMBOL_GPL(ahash_free_instance);
588
589 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
590 struct hash_alg_common *alg,
591 struct crypto_instance *inst)
592 {
593 return crypto_init_spawn2(&spawn->base, &alg->base, inst,
594 &crypto_ahash_type);
595 }
596 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
597
598 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
599 {
600 struct crypto_alg *alg;
601
602 alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
603 return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
604 }
605 EXPORT_SYMBOL_GPL(ahash_attr_alg);
606
607 MODULE_LICENSE("GPL");
608 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
This page took 0.044125 seconds and 5 git commands to generate.