2 * Multi buffer SHA512 algorithm Glue Code
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * Copyright(c) 2016 Intel Corporation.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
25 * Copyright(c) 2016 Intel Corporation.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56 #include <crypto/internal/hash.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
60 #include <linux/cryptohash.h>
61 #include <linux/types.h>
62 #include <linux/list.h>
63 #include <crypto/scatterwalk.h>
64 #include <crypto/sha.h>
65 #include <crypto/mcryptd.h>
66 #include <crypto/crypto_wq.h>
67 #include <asm/byteorder.h>
68 #include <linux/hardirq.h>
69 #include <asm/fpu/api.h>
70 #include "sha512_mb_ctx.h"
72 #define FLUSH_INTERVAL 1000 /* in usec */
74 static struct mcryptd_alg_state sha512_mb_alg_state
;
76 struct sha512_mb_ctx
{
77 struct mcryptd_ahash
*mcryptd_tfm
;
80 static inline struct mcryptd_hash_request_ctx
81 *cast_hash_to_mcryptd_ctx(struct sha512_hash_ctx
*hash_ctx
)
83 struct ahash_request
*areq
;
85 areq
= container_of((void *) hash_ctx
, struct ahash_request
, __ctx
);
86 return container_of(areq
, struct mcryptd_hash_request_ctx
, areq
);
89 static inline struct ahash_request
90 *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx
*ctx
)
92 return container_of((void *) ctx
, struct ahash_request
, __ctx
);
95 static void req_ctx_init(struct mcryptd_hash_request_ctx
*rctx
,
96 struct ahash_request
*areq
)
98 rctx
->flag
= HASH_UPDATE
;
101 static asmlinkage
void (*sha512_job_mgr_init
)(struct sha512_mb_mgr
*state
);
102 static asmlinkage
struct job_sha512
* (*sha512_job_mgr_submit
)
103 (struct sha512_mb_mgr
*state
,
104 struct job_sha512
*job
);
105 static asmlinkage
struct job_sha512
* (*sha512_job_mgr_flush
)
106 (struct sha512_mb_mgr
*state
);
107 static asmlinkage
struct job_sha512
* (*sha512_job_mgr_get_comp_job
)
108 (struct sha512_mb_mgr
*state
);
110 inline void sha512_init_digest(uint64_t *digest
)
112 static const uint64_t initial_digest
[SHA512_DIGEST_LENGTH
] = {
113 SHA512_H0
, SHA512_H1
, SHA512_H2
,
114 SHA512_H3
, SHA512_H4
, SHA512_H5
,
115 SHA512_H6
, SHA512_H7
};
116 memcpy(digest
, initial_digest
, sizeof(initial_digest
));
119 inline uint32_t sha512_pad(uint8_t padblock
[SHA512_BLOCK_SIZE
* 2],
122 uint32_t i
= total_len
& (SHA512_BLOCK_SIZE
- 1);
124 memset(&padblock
[i
], 0, SHA512_BLOCK_SIZE
);
127 i
+= ((SHA512_BLOCK_SIZE
- 1) &
128 (0 - (total_len
+ SHA512_PADLENGTHFIELD_SIZE
+ 1)))
129 + 1 + SHA512_PADLENGTHFIELD_SIZE
;
131 #if SHA512_PADLENGTHFIELD_SIZE == 16
132 *((uint64_t *) &padblock
[i
- 16]) = 0;
135 *((uint64_t *) &padblock
[i
- 8]) = cpu_to_be64(total_len
<< 3);
137 /* Number of extra blocks to hash */
138 return i
>> SHA512_LOG2_BLOCK_SIZE
;
141 static struct sha512_hash_ctx
*sha512_ctx_mgr_resubmit
142 (struct sha512_ctx_mgr
*mgr
, struct sha512_hash_ctx
*ctx
)
145 if (ctx
->status
& HASH_CTX_STS_COMPLETE
) {
146 /* Clear PROCESSING bit */
147 ctx
->status
= HASH_CTX_STS_COMPLETE
;
152 * If the extra blocks are empty, begin hashing what remains
153 * in the user's buffer.
155 if (ctx
->partial_block_buffer_length
== 0 &&
156 ctx
->incoming_buffer_length
) {
158 const void *buffer
= ctx
->incoming_buffer
;
159 uint32_t len
= ctx
->incoming_buffer_length
;
163 * Only entire blocks can be hashed.
164 * Copy remainder to extra blocks buffer.
166 copy_len
= len
& (SHA512_BLOCK_SIZE
-1);
170 memcpy(ctx
->partial_block_buffer
,
171 ((const char *) buffer
+ len
),
173 ctx
->partial_block_buffer_length
= copy_len
;
176 ctx
->incoming_buffer_length
= 0;
178 /* len should be a multiple of the block size now */
179 assert((len
% SHA512_BLOCK_SIZE
) == 0);
181 /* Set len to the number of blocks to be hashed */
182 len
>>= SHA512_LOG2_BLOCK_SIZE
;
186 ctx
->job
.buffer
= (uint8_t *) buffer
;
188 ctx
= (struct sha512_hash_ctx
*)
189 sha512_job_mgr_submit(&mgr
->mgr
,
196 * If the extra blocks are not empty, then we are
197 * either on the last block(s) or we need more
198 * user input before continuing.
200 if (ctx
->status
& HASH_CTX_STS_LAST
) {
202 uint8_t *buf
= ctx
->partial_block_buffer
;
203 uint32_t n_extra_blocks
=
204 sha512_pad(buf
, ctx
->total_length
);
206 ctx
->status
= (HASH_CTX_STS_PROCESSING
|
207 HASH_CTX_STS_COMPLETE
);
208 ctx
->job
.buffer
= buf
;
209 ctx
->job
.len
= (uint32_t) n_extra_blocks
;
210 ctx
= (struct sha512_hash_ctx
*)
211 sha512_job_mgr_submit(&mgr
->mgr
, &ctx
->job
);
216 ctx
->status
= HASH_CTX_STS_IDLE
;
223 static struct sha512_hash_ctx
224 *sha512_ctx_mgr_get_comp_ctx(struct sha512_ctx_mgr
*mgr
)
227 * If get_comp_job returns NULL, there are no jobs complete.
228 * If get_comp_job returns a job, verify that it is safe to return to
230 * If it is not ready, resubmit the job to finish processing.
231 * If sha512_ctx_mgr_resubmit returned a job, it is ready to be
233 * Otherwise, all jobs currently being managed by the hash_ctx_mgr
234 * still need processing.
236 struct sha512_hash_ctx
*ctx
;
238 ctx
= (struct sha512_hash_ctx
*)
239 sha512_job_mgr_get_comp_job(&mgr
->mgr
);
240 return sha512_ctx_mgr_resubmit(mgr
, ctx
);
243 static void sha512_ctx_mgr_init(struct sha512_ctx_mgr
*mgr
)
245 sha512_job_mgr_init(&mgr
->mgr
);
248 static struct sha512_hash_ctx
249 *sha512_ctx_mgr_submit(struct sha512_ctx_mgr
*mgr
,
250 struct sha512_hash_ctx
*ctx
,
255 if (flags
& (~HASH_ENTIRE
)) {
257 * User should not pass anything other than FIRST, UPDATE, or
260 ctx
->error
= HASH_CTX_ERROR_INVALID_FLAGS
;
264 if (ctx
->status
& HASH_CTX_STS_PROCESSING
) {
265 /* Cannot submit to a currently processing job. */
266 ctx
->error
= HASH_CTX_ERROR_ALREADY_PROCESSING
;
270 if ((ctx
->status
& HASH_CTX_STS_COMPLETE
) && !(flags
& HASH_FIRST
)) {
271 /* Cannot update a finished job. */
272 ctx
->error
= HASH_CTX_ERROR_ALREADY_COMPLETED
;
277 if (flags
& HASH_FIRST
) {
279 sha512_init_digest(ctx
->job
.result_digest
);
281 /* Reset byte counter */
282 ctx
->total_length
= 0;
284 /* Clear extra blocks */
285 ctx
->partial_block_buffer_length
= 0;
289 * If we made it here, there were no errors during this call to
292 ctx
->error
= HASH_CTX_ERROR_NONE
;
294 /* Store buffer ptr info from user */
295 ctx
->incoming_buffer
= buffer
;
296 ctx
->incoming_buffer_length
= len
;
299 * Store the user's request flags and mark this ctx as currently being
302 ctx
->status
= (flags
& HASH_LAST
) ?
303 (HASH_CTX_STS_PROCESSING
| HASH_CTX_STS_LAST
) :
304 HASH_CTX_STS_PROCESSING
;
306 /* Advance byte counter */
307 ctx
->total_length
+= len
;
310 * If there is anything currently buffered in the extra blocks,
311 * append to it until it contains a whole block.
312 * Or if the user's buffer contains less than a whole block,
313 * append as much as possible to the extra block.
315 if (ctx
->partial_block_buffer_length
|| len
< SHA512_BLOCK_SIZE
) {
316 /* Compute how many bytes to copy from user buffer into extra
319 uint32_t copy_len
= SHA512_BLOCK_SIZE
-
320 ctx
->partial_block_buffer_length
;
325 /* Copy and update relevant pointers and counters */
327 (&ctx
->partial_block_buffer
[ctx
->partial_block_buffer_length
],
330 ctx
->partial_block_buffer_length
+= copy_len
;
331 ctx
->incoming_buffer
= (const void *)
332 ((const char *)buffer
+ copy_len
);
333 ctx
->incoming_buffer_length
= len
- copy_len
;
336 /* The extra block should never contain more than 1 block
339 assert(ctx
->partial_block_buffer_length
<= SHA512_BLOCK_SIZE
);
341 /* If the extra block buffer contains exactly 1 block, it can
344 if (ctx
->partial_block_buffer_length
>= SHA512_BLOCK_SIZE
) {
345 ctx
->partial_block_buffer_length
= 0;
347 ctx
->job
.buffer
= ctx
->partial_block_buffer
;
349 ctx
= (struct sha512_hash_ctx
*)
350 sha512_job_mgr_submit(&mgr
->mgr
, &ctx
->job
);
354 return sha512_ctx_mgr_resubmit(mgr
, ctx
);
357 static struct sha512_hash_ctx
*sha512_ctx_mgr_flush(struct sha512_ctx_mgr
*mgr
)
359 struct sha512_hash_ctx
*ctx
;
362 ctx
= (struct sha512_hash_ctx
*)
363 sha512_job_mgr_flush(&mgr
->mgr
);
365 /* If flush returned 0, there are no more jobs in flight. */
370 * If flush returned a job, resubmit the job to finish
373 ctx
= sha512_ctx_mgr_resubmit(mgr
, ctx
);
376 * If sha512_ctx_mgr_resubmit returned a job, it is ready to
377 * be returned. Otherwise, all jobs currently being managed by
378 * the sha512_ctx_mgr still need processing. Loop.
385 static int sha512_mb_init(struct ahash_request
*areq
)
387 struct sha512_hash_ctx
*sctx
= ahash_request_ctx(areq
);
390 sctx
->job
.result_digest
[0] = SHA512_H0
;
391 sctx
->job
.result_digest
[1] = SHA512_H1
;
392 sctx
->job
.result_digest
[2] = SHA512_H2
;
393 sctx
->job
.result_digest
[3] = SHA512_H3
;
394 sctx
->job
.result_digest
[4] = SHA512_H4
;
395 sctx
->job
.result_digest
[5] = SHA512_H5
;
396 sctx
->job
.result_digest
[6] = SHA512_H6
;
397 sctx
->job
.result_digest
[7] = SHA512_H7
;
398 sctx
->total_length
= 0;
399 sctx
->partial_block_buffer_length
= 0;
400 sctx
->status
= HASH_CTX_STS_IDLE
;
405 static int sha512_mb_set_results(struct mcryptd_hash_request_ctx
*rctx
)
408 struct sha512_hash_ctx
*sctx
= ahash_request_ctx(&rctx
->areq
);
409 __be64
*dst
= (__be64
*) rctx
->out
;
411 for (i
= 0; i
< 8; ++i
)
412 dst
[i
] = cpu_to_be64(sctx
->job
.result_digest
[i
]);
417 static int sha_finish_walk(struct mcryptd_hash_request_ctx
**ret_rctx
,
418 struct mcryptd_alg_cstate
*cstate
, bool flush
)
420 int flag
= HASH_UPDATE
;
422 struct mcryptd_hash_request_ctx
*rctx
= *ret_rctx
;
423 struct sha512_hash_ctx
*sha_ctx
;
426 while (!(rctx
->flag
& HASH_DONE
)) {
427 nbytes
= crypto_ahash_walk_done(&rctx
->walk
, 0);
432 /* check if the walk is done */
433 if (crypto_ahash_walk_last(&rctx
->walk
)) {
434 rctx
->flag
|= HASH_DONE
;
435 if (rctx
->flag
& HASH_FINAL
)
439 sha_ctx
= (struct sha512_hash_ctx
*)
440 ahash_request_ctx(&rctx
->areq
);
442 sha_ctx
= sha512_ctx_mgr_submit(cstate
->mgr
, sha_ctx
,
443 rctx
->walk
.data
, nbytes
, flag
);
446 sha_ctx
= sha512_ctx_mgr_flush(cstate
->mgr
);
450 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
457 /* copy the results */
458 if (rctx
->flag
& HASH_FINAL
)
459 sha512_mb_set_results(rctx
);
466 static int sha_complete_job(struct mcryptd_hash_request_ctx
*rctx
,
467 struct mcryptd_alg_cstate
*cstate
,
470 struct ahash_request
*req
= cast_mcryptd_ctx_to_req(rctx
);
471 struct sha512_hash_ctx
*sha_ctx
;
472 struct mcryptd_hash_request_ctx
*req_ctx
;
475 /* remove from work list */
476 spin_lock(&cstate
->work_lock
);
477 list_del(&rctx
->waiter
);
478 spin_unlock(&cstate
->work_lock
);
481 rctx
->complete(&req
->base
, err
);
484 rctx
->complete(&req
->base
, err
);
488 /* check to see if there are other jobs that are done */
489 sha_ctx
= sha512_ctx_mgr_get_comp_ctx(cstate
->mgr
);
491 req_ctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
492 ret
= sha_finish_walk(&req_ctx
, cstate
, false);
494 spin_lock(&cstate
->work_lock
);
495 list_del(&req_ctx
->waiter
);
496 spin_unlock(&cstate
->work_lock
);
498 req
= cast_mcryptd_ctx_to_req(req_ctx
);
500 req_ctx
->complete(&req
->base
, ret
);
503 req_ctx
->complete(&req
->base
, ret
);
507 sha_ctx
= sha512_ctx_mgr_get_comp_ctx(cstate
->mgr
);
513 static void sha512_mb_add_list(struct mcryptd_hash_request_ctx
*rctx
,
514 struct mcryptd_alg_cstate
*cstate
)
516 unsigned long next_flush
;
517 unsigned long delay
= usecs_to_jiffies(FLUSH_INTERVAL
);
520 rctx
->tag
.arrival
= jiffies
; /* tag the arrival time */
521 rctx
->tag
.seq_num
= cstate
->next_seq_num
++;
522 next_flush
= rctx
->tag
.arrival
+ delay
;
523 rctx
->tag
.expire
= next_flush
;
525 spin_lock(&cstate
->work_lock
);
526 list_add_tail(&rctx
->waiter
, &cstate
->work_list
);
527 spin_unlock(&cstate
->work_lock
);
529 mcryptd_arm_flusher(cstate
, delay
);
532 static int sha512_mb_update(struct ahash_request
*areq
)
534 struct mcryptd_hash_request_ctx
*rctx
=
535 container_of(areq
, struct mcryptd_hash_request_ctx
,
537 struct mcryptd_alg_cstate
*cstate
=
538 this_cpu_ptr(sha512_mb_alg_state
.alg_cstate
);
540 struct ahash_request
*req
= cast_mcryptd_ctx_to_req(rctx
);
541 struct sha512_hash_ctx
*sha_ctx
;
546 if (rctx
->tag
.cpu
!= smp_processor_id()) {
547 pr_err("mcryptd error: cpu clash\n");
551 /* need to init context */
552 req_ctx_init(rctx
, areq
);
554 nbytes
= crypto_ahash_walk_first(req
, &rctx
->walk
);
561 if (crypto_ahash_walk_last(&rctx
->walk
))
562 rctx
->flag
|= HASH_DONE
;
565 sha_ctx
= (struct sha512_hash_ctx
*) ahash_request_ctx(areq
);
566 sha512_mb_add_list(rctx
, cstate
);
568 sha_ctx
= sha512_ctx_mgr_submit(cstate
->mgr
, sha_ctx
, rctx
->walk
.data
,
569 nbytes
, HASH_UPDATE
);
572 /* check if anything is returned */
576 if (sha_ctx
->error
) {
577 ret
= sha_ctx
->error
;
578 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
582 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
583 ret
= sha_finish_walk(&rctx
, cstate
, false);
588 sha_complete_job(rctx
, cstate
, ret
);
592 static int sha512_mb_finup(struct ahash_request
*areq
)
594 struct mcryptd_hash_request_ctx
*rctx
=
595 container_of(areq
, struct mcryptd_hash_request_ctx
,
597 struct mcryptd_alg_cstate
*cstate
=
598 this_cpu_ptr(sha512_mb_alg_state
.alg_cstate
);
600 struct ahash_request
*req
= cast_mcryptd_ctx_to_req(rctx
);
601 struct sha512_hash_ctx
*sha_ctx
;
602 int ret
= 0, flag
= HASH_UPDATE
, nbytes
;
605 if (rctx
->tag
.cpu
!= smp_processor_id()) {
606 pr_err("mcryptd error: cpu clash\n");
610 /* need to init context */
611 req_ctx_init(rctx
, areq
);
613 nbytes
= crypto_ahash_walk_first(req
, &rctx
->walk
);
620 if (crypto_ahash_walk_last(&rctx
->walk
)) {
621 rctx
->flag
|= HASH_DONE
;
626 rctx
->flag
|= HASH_FINAL
;
627 sha_ctx
= (struct sha512_hash_ctx
*) ahash_request_ctx(areq
);
628 sha512_mb_add_list(rctx
, cstate
);
631 sha_ctx
= sha512_ctx_mgr_submit(cstate
->mgr
, sha_ctx
, rctx
->walk
.data
,
635 /* check if anything is returned */
639 if (sha_ctx
->error
) {
640 ret
= sha_ctx
->error
;
644 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
645 ret
= sha_finish_walk(&rctx
, cstate
, false);
649 sha_complete_job(rctx
, cstate
, ret
);
653 static int sha512_mb_final(struct ahash_request
*areq
)
655 struct mcryptd_hash_request_ctx
*rctx
=
656 container_of(areq
, struct mcryptd_hash_request_ctx
,
658 struct mcryptd_alg_cstate
*cstate
=
659 this_cpu_ptr(sha512_mb_alg_state
.alg_cstate
);
661 struct sha512_hash_ctx
*sha_ctx
;
666 if (rctx
->tag
.cpu
!= smp_processor_id()) {
667 pr_err("mcryptd error: cpu clash\n");
671 /* need to init context */
672 req_ctx_init(rctx
, areq
);
674 rctx
->flag
|= HASH_DONE
| HASH_FINAL
;
676 sha_ctx
= (struct sha512_hash_ctx
*) ahash_request_ctx(areq
);
677 /* flag HASH_FINAL and 0 data size */
678 sha512_mb_add_list(rctx
, cstate
);
680 sha_ctx
= sha512_ctx_mgr_submit(cstate
->mgr
, sha_ctx
, &data
, 0,
684 /* check if anything is returned */
688 if (sha_ctx
->error
) {
689 ret
= sha_ctx
->error
;
690 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
694 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
695 ret
= sha_finish_walk(&rctx
, cstate
, false);
699 sha_complete_job(rctx
, cstate
, ret
);
703 static int sha512_mb_export(struct ahash_request
*areq
, void *out
)
705 struct sha512_hash_ctx
*sctx
= ahash_request_ctx(areq
);
707 memcpy(out
, sctx
, sizeof(*sctx
));
712 static int sha512_mb_import(struct ahash_request
*areq
, const void *in
)
714 struct sha512_hash_ctx
*sctx
= ahash_request_ctx(areq
);
716 memcpy(sctx
, in
, sizeof(*sctx
));
721 static int sha512_mb_async_init_tfm(struct crypto_tfm
*tfm
)
723 struct mcryptd_ahash
*mcryptd_tfm
;
724 struct sha512_mb_ctx
*ctx
= crypto_tfm_ctx(tfm
);
725 struct mcryptd_hash_ctx
*mctx
;
727 mcryptd_tfm
= mcryptd_alloc_ahash("__intel_sha512-mb",
729 CRYPTO_ALG_INTERNAL
);
730 if (IS_ERR(mcryptd_tfm
))
731 return PTR_ERR(mcryptd_tfm
);
732 mctx
= crypto_ahash_ctx(&mcryptd_tfm
->base
);
733 mctx
->alg_state
= &sha512_mb_alg_state
;
734 ctx
->mcryptd_tfm
= mcryptd_tfm
;
735 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
736 sizeof(struct ahash_request
) +
737 crypto_ahash_reqsize(&mcryptd_tfm
->base
));
742 static void sha512_mb_async_exit_tfm(struct crypto_tfm
*tfm
)
744 struct sha512_mb_ctx
*ctx
= crypto_tfm_ctx(tfm
);
746 mcryptd_free_ahash(ctx
->mcryptd_tfm
);
749 static int sha512_mb_areq_init_tfm(struct crypto_tfm
*tfm
)
751 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
752 sizeof(struct ahash_request
) +
753 sizeof(struct sha512_hash_ctx
));
758 static void sha512_mb_areq_exit_tfm(struct crypto_tfm
*tfm
)
760 struct sha512_mb_ctx
*ctx
= crypto_tfm_ctx(tfm
);
762 mcryptd_free_ahash(ctx
->mcryptd_tfm
);
765 static struct ahash_alg sha512_mb_areq_alg
= {
766 .init
= sha512_mb_init
,
767 .update
= sha512_mb_update
,
768 .final
= sha512_mb_final
,
769 .finup
= sha512_mb_finup
,
770 .export
= sha512_mb_export
,
771 .import
= sha512_mb_import
,
773 .digestsize
= SHA512_DIGEST_SIZE
,
774 .statesize
= sizeof(struct sha512_hash_ctx
),
776 .cra_name
= "__sha512-mb",
777 .cra_driver_name
= "__intel_sha512-mb",
780 * use ASYNC flag as some buffers in multi-buffer
781 * algo may not have completed before hashing thread
784 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
787 .cra_blocksize
= SHA512_BLOCK_SIZE
,
788 .cra_module
= THIS_MODULE
,
789 .cra_list
= LIST_HEAD_INIT
790 (sha512_mb_areq_alg
.halg
.base
.cra_list
),
791 .cra_init
= sha512_mb_areq_init_tfm
,
792 .cra_exit
= sha512_mb_areq_exit_tfm
,
793 .cra_ctxsize
= sizeof(struct sha512_hash_ctx
),
798 static int sha512_mb_async_init(struct ahash_request
*req
)
800 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
801 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
802 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
803 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
805 memcpy(mcryptd_req
, req
, sizeof(*req
));
806 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
807 return crypto_ahash_init(mcryptd_req
);
810 static int sha512_mb_async_update(struct ahash_request
*req
)
812 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
814 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
815 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
816 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
818 memcpy(mcryptd_req
, req
, sizeof(*req
));
819 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
820 return crypto_ahash_update(mcryptd_req
);
823 static int sha512_mb_async_finup(struct ahash_request
*req
)
825 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
827 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
828 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
829 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
831 memcpy(mcryptd_req
, req
, sizeof(*req
));
832 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
833 return crypto_ahash_finup(mcryptd_req
);
836 static int sha512_mb_async_final(struct ahash_request
*req
)
838 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
840 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
841 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
842 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
844 memcpy(mcryptd_req
, req
, sizeof(*req
));
845 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
846 return crypto_ahash_final(mcryptd_req
);
849 static int sha512_mb_async_digest(struct ahash_request
*req
)
851 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
852 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
853 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
854 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
856 memcpy(mcryptd_req
, req
, sizeof(*req
));
857 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
858 return crypto_ahash_digest(mcryptd_req
);
861 static int sha512_mb_async_export(struct ahash_request
*req
, void *out
)
863 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
864 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
865 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
866 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
868 memcpy(mcryptd_req
, req
, sizeof(*req
));
869 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
870 return crypto_ahash_export(mcryptd_req
, out
);
873 static int sha512_mb_async_import(struct ahash_request
*req
, const void *in
)
875 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
876 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
877 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
878 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
879 struct crypto_ahash
*child
= mcryptd_ahash_child(mcryptd_tfm
);
880 struct mcryptd_hash_request_ctx
*rctx
;
881 struct ahash_request
*areq
;
883 memcpy(mcryptd_req
, req
, sizeof(*req
));
884 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
885 rctx
= ahash_request_ctx(mcryptd_req
);
889 ahash_request_set_tfm(areq
, child
);
890 ahash_request_set_callback(areq
, CRYPTO_TFM_REQ_MAY_SLEEP
,
891 rctx
->complete
, req
);
893 return crypto_ahash_import(mcryptd_req
, in
);
896 static struct ahash_alg sha512_mb_async_alg
= {
897 .init
= sha512_mb_async_init
,
898 .update
= sha512_mb_async_update
,
899 .final
= sha512_mb_async_final
,
900 .finup
= sha512_mb_async_finup
,
901 .digest
= sha512_mb_async_digest
,
902 .export
= sha512_mb_async_export
,
903 .import
= sha512_mb_async_import
,
905 .digestsize
= SHA512_DIGEST_SIZE
,
906 .statesize
= sizeof(struct sha512_hash_ctx
),
908 .cra_name
= "sha512",
909 .cra_driver_name
= "sha512_mb",
911 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
913 .cra_blocksize
= SHA512_BLOCK_SIZE
,
914 .cra_type
= &crypto_ahash_type
,
915 .cra_module
= THIS_MODULE
,
916 .cra_list
= LIST_HEAD_INIT
917 (sha512_mb_async_alg
.halg
.base
.cra_list
),
918 .cra_init
= sha512_mb_async_init_tfm
,
919 .cra_exit
= sha512_mb_async_exit_tfm
,
920 .cra_ctxsize
= sizeof(struct sha512_mb_ctx
),
926 static unsigned long sha512_mb_flusher(struct mcryptd_alg_cstate
*cstate
)
928 struct mcryptd_hash_request_ctx
*rctx
;
929 unsigned long cur_time
;
930 unsigned long next_flush
= 0;
931 struct sha512_hash_ctx
*sha_ctx
;
936 while (!list_empty(&cstate
->work_list
)) {
937 rctx
= list_entry(cstate
->work_list
.next
,
938 struct mcryptd_hash_request_ctx
, waiter
);
939 if time_before(cur_time
, rctx
->tag
.expire
)
942 sha_ctx
= (struct sha512_hash_ctx
*)
943 sha512_ctx_mgr_flush(cstate
->mgr
);
946 pr_err("sha512_mb error: nothing got flushed for"
947 " non-empty list\n");
950 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
951 sha_finish_walk(&rctx
, cstate
, true);
952 sha_complete_job(rctx
, cstate
, 0);
955 if (!list_empty(&cstate
->work_list
)) {
956 rctx
= list_entry(cstate
->work_list
.next
,
957 struct mcryptd_hash_request_ctx
, waiter
);
958 /* get the hash context and then flush time */
959 next_flush
= rctx
->tag
.expire
;
960 mcryptd_arm_flusher(cstate
, get_delay(next_flush
));
965 static int __init
sha512_mb_mod_init(void)
970 struct mcryptd_alg_cstate
*cpu_state
;
972 /* check for dependent cpu features */
973 if (!boot_cpu_has(X86_FEATURE_AVX2
) ||
974 !boot_cpu_has(X86_FEATURE_BMI2
))
977 /* initialize multibuffer structures */
978 sha512_mb_alg_state
.alg_cstate
=
979 alloc_percpu(struct mcryptd_alg_cstate
);
981 sha512_job_mgr_init
= sha512_mb_mgr_init_avx2
;
982 sha512_job_mgr_submit
= sha512_mb_mgr_submit_avx2
;
983 sha512_job_mgr_flush
= sha512_mb_mgr_flush_avx2
;
984 sha512_job_mgr_get_comp_job
= sha512_mb_mgr_get_comp_job_avx2
;
986 if (!sha512_mb_alg_state
.alg_cstate
)
988 for_each_possible_cpu(cpu
) {
989 cpu_state
= per_cpu_ptr(sha512_mb_alg_state
.alg_cstate
, cpu
);
990 cpu_state
->next_flush
= 0;
991 cpu_state
->next_seq_num
= 0;
992 cpu_state
->flusher_engaged
= false;
993 INIT_DELAYED_WORK(&cpu_state
->flush
, mcryptd_flusher
);
994 cpu_state
->cpu
= cpu
;
995 cpu_state
->alg_state
= &sha512_mb_alg_state
;
996 cpu_state
->mgr
= kzalloc(sizeof(struct sha512_ctx_mgr
),
1000 sha512_ctx_mgr_init(cpu_state
->mgr
);
1001 INIT_LIST_HEAD(&cpu_state
->work_list
);
1002 spin_lock_init(&cpu_state
->work_lock
);
1004 sha512_mb_alg_state
.flusher
= &sha512_mb_flusher
;
1006 err
= crypto_register_ahash(&sha512_mb_areq_alg
);
1009 err
= crypto_register_ahash(&sha512_mb_async_alg
);
1016 crypto_unregister_ahash(&sha512_mb_areq_alg
);
1018 for_each_possible_cpu(cpu
) {
1019 cpu_state
= per_cpu_ptr(sha512_mb_alg_state
.alg_cstate
, cpu
);
1020 kfree(cpu_state
->mgr
);
1022 free_percpu(sha512_mb_alg_state
.alg_cstate
);
1026 static void __exit
sha512_mb_mod_fini(void)
1029 struct mcryptd_alg_cstate
*cpu_state
;
1031 crypto_unregister_ahash(&sha512_mb_async_alg
);
1032 crypto_unregister_ahash(&sha512_mb_areq_alg
);
1033 for_each_possible_cpu(cpu
) {
1034 cpu_state
= per_cpu_ptr(sha512_mb_alg_state
.alg_cstate
, cpu
);
1035 kfree(cpu_state
->mgr
);
1037 free_percpu(sha512_mb_alg_state
.alg_cstate
);
1040 module_init(sha512_mb_mod_init
);
1041 module_exit(sha512_mb_mod_fini
);
1043 MODULE_LICENSE("GPL");
1044 MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, multi buffer accelerated");
1046 MODULE_ALIAS("sha512");