2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
26 * | JobDesc #3 |------| |
32 * | JobDesc #4 |------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
46 * | ShareDesc Pointer |
53 * ---------------------
60 #include "desc_constr.h"
63 #include "sg_sw_sec4.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
94 #define debug(format, arg...)
98 static struct list_head hash_list
;
100 /* ahash per-session context */
101 struct caam_hash_ctx
{
102 struct device
*jrdev
;
103 u32 sh_desc_update
[DESC_HASH_MAX_USED_LEN
];
104 u32 sh_desc_update_first
[DESC_HASH_MAX_USED_LEN
];
105 u32 sh_desc_fin
[DESC_HASH_MAX_USED_LEN
];
106 u32 sh_desc_digest
[DESC_HASH_MAX_USED_LEN
];
107 u32 sh_desc_finup
[DESC_HASH_MAX_USED_LEN
];
108 dma_addr_t sh_desc_update_dma
;
109 dma_addr_t sh_desc_update_first_dma
;
110 dma_addr_t sh_desc_fin_dma
;
111 dma_addr_t sh_desc_digest_dma
;
112 dma_addr_t sh_desc_finup_dma
;
115 u8 key
[CAAM_MAX_HASH_KEY_SIZE
];
118 unsigned int split_key_len
;
119 unsigned int split_key_pad_len
;
123 struct caam_hash_state
{
126 u8 buf_0
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
128 u8 buf_1
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
130 u8 caam_ctx
[MAX_CTX_LEN
] ____cacheline_aligned
;
131 int (*update
)(struct ahash_request
*req
);
132 int (*final
)(struct ahash_request
*req
);
133 int (*finup
)(struct ahash_request
*req
);
137 struct caam_export_state
{
138 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
];
139 u8 caam_ctx
[MAX_CTX_LEN
];
141 int (*update
)(struct ahash_request
*req
);
142 int (*final
)(struct ahash_request
*req
);
143 int (*finup
)(struct ahash_request
*req
);
146 /* Common job descriptor seq in/out ptr routines */
148 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
149 static inline int map_seq_out_ptr_ctx(u32
*desc
, struct device
*jrdev
,
150 struct caam_hash_state
*state
,
153 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
,
154 ctx_len
, DMA_FROM_DEVICE
);
155 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
156 dev_err(jrdev
, "unable to map ctx\n");
160 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx_len
, 0);
165 /* Map req->result, and append seq_out_ptr command that points to it */
166 static inline dma_addr_t
map_seq_out_ptr_result(u32
*desc
, struct device
*jrdev
,
167 u8
*result
, int digestsize
)
171 dst_dma
= dma_map_single(jrdev
, result
, digestsize
, DMA_FROM_DEVICE
);
172 append_seq_out_ptr(desc
, dst_dma
, digestsize
, 0);
177 /* Map current buffer in state and put it in link table */
178 static inline dma_addr_t
buf_map_to_sec4_sg(struct device
*jrdev
,
179 struct sec4_sg_entry
*sec4_sg
,
184 buf_dma
= dma_map_single(jrdev
, buf
, buflen
, DMA_TO_DEVICE
);
185 dma_to_sec4_sg_one(sec4_sg
, buf_dma
, buflen
, 0);
190 /* Map req->src and put it in link table */
191 static inline void src_map_to_sec4_sg(struct device
*jrdev
,
192 struct scatterlist
*src
, int src_nents
,
193 struct sec4_sg_entry
*sec4_sg
)
195 dma_map_sg(jrdev
, src
, src_nents
, DMA_TO_DEVICE
);
196 sg_to_sec4_sg_last(src
, src_nents
, sec4_sg
, 0);
200 * Only put buffer in link table if it contains data, which is possible,
201 * since a buffer has previously been used, and needs to be unmapped,
203 static inline dma_addr_t
204 try_buf_map_to_sec4_sg(struct device
*jrdev
, struct sec4_sg_entry
*sec4_sg
,
205 u8
*buf
, dma_addr_t buf_dma
, int buflen
,
208 if (buf_dma
&& !dma_mapping_error(jrdev
, buf_dma
))
209 dma_unmap_single(jrdev
, buf_dma
, last_buflen
, DMA_TO_DEVICE
);
211 buf_dma
= buf_map_to_sec4_sg(jrdev
, sec4_sg
, buf
, buflen
);
218 /* Map state->caam_ctx, and add it to link table */
219 static inline int ctx_map_to_sec4_sg(u32
*desc
, struct device
*jrdev
,
220 struct caam_hash_state
*state
, int ctx_len
,
221 struct sec4_sg_entry
*sec4_sg
, u32 flag
)
223 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
, ctx_len
, flag
);
224 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
225 dev_err(jrdev
, "unable to map ctx\n");
229 dma_to_sec4_sg_one(sec4_sg
, state
->ctx_dma
, ctx_len
, 0);
234 /* Common shared descriptor commands */
235 static inline void append_key_ahash(u32
*desc
, struct caam_hash_ctx
*ctx
)
237 append_key_as_imm(desc
, ctx
->key
, ctx
->split_key_pad_len
,
238 ctx
->split_key_len
, CLASS_2
|
239 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
242 /* Append key if it has been set */
243 static inline void init_sh_desc_key_ahash(u32
*desc
, struct caam_hash_ctx
*ctx
)
247 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
249 if (ctx
->split_key_len
) {
250 /* Skip if already shared */
251 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
254 append_key_ahash(desc
, ctx
);
256 set_jump_tgt_here(desc
, key_jump_cmd
);
259 /* Propagate errors from shared to job descriptor */
260 append_cmd(desc
, SET_OK_NO_PROP_ERRORS
| CMD_LOAD
);
264 * For ahash read data from seqin following state->caam_ctx,
265 * and write resulting class2 context to seqout, which may be state->caam_ctx
268 static inline void ahash_append_load_str(u32
*desc
, int digestsize
)
270 /* Calculate remaining bytes to read */
271 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
273 /* Read remaining bytes */
274 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_LAST2
|
275 FIFOLD_TYPE_MSG
| KEY_VLF
);
277 /* Store class2 context bytes */
278 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
279 LDST_SRCDST_BYTE_CONTEXT
);
283 * For ahash update, final and finup, import context, read and write to seqout
285 static inline void ahash_ctx_data_to_out(u32
*desc
, u32 op
, u32 state
,
287 struct caam_hash_ctx
*ctx
)
289 init_sh_desc_key_ahash(desc
, ctx
);
291 /* Import context from software */
292 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
293 LDST_CLASS_2_CCB
| ctx
->ctx_len
);
295 /* Class 2 operation */
296 append_operation(desc
, op
| state
| OP_ALG_ENCRYPT
);
299 * Load from buf and/or src and write to req->result or state->context
301 ahash_append_load_str(desc
, digestsize
);
304 /* For ahash firsts and digest, read and write to seqout */
305 static inline void ahash_data_to_out(u32
*desc
, u32 op
, u32 state
,
306 int digestsize
, struct caam_hash_ctx
*ctx
)
308 init_sh_desc_key_ahash(desc
, ctx
);
310 /* Class 2 operation */
311 append_operation(desc
, op
| state
| OP_ALG_ENCRYPT
);
314 * Load from buf and/or src and write to req->result or state->context
316 ahash_append_load_str(desc
, digestsize
);
319 static int ahash_set_sh_desc(struct crypto_ahash
*ahash
)
321 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
322 int digestsize
= crypto_ahash_digestsize(ahash
);
323 struct device
*jrdev
= ctx
->jrdev
;
327 if (ctx
->split_key_len
)
328 have_key
= OP_ALG_AAI_HMAC_PRECOMP
;
330 /* ahash_update shared descriptor */
331 desc
= ctx
->sh_desc_update
;
333 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
335 /* Import context from software */
336 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
337 LDST_CLASS_2_CCB
| ctx
->ctx_len
);
339 /* Class 2 operation */
340 append_operation(desc
, ctx
->alg_type
| OP_ALG_AS_UPDATE
|
343 /* Load data and write to result or context */
344 ahash_append_load_str(desc
, ctx
->ctx_len
);
346 ctx
->sh_desc_update_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
348 if (dma_mapping_error(jrdev
, ctx
->sh_desc_update_dma
)) {
349 dev_err(jrdev
, "unable to map shared descriptor\n");
353 print_hex_dump(KERN_ERR
,
354 "ahash update shdesc@"__stringify(__LINE__
)": ",
355 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
358 /* ahash_update_first shared descriptor */
359 desc
= ctx
->sh_desc_update_first
;
361 ahash_data_to_out(desc
, have_key
| ctx
->alg_type
, OP_ALG_AS_INIT
,
364 ctx
->sh_desc_update_first_dma
= dma_map_single(jrdev
, desc
,
367 if (dma_mapping_error(jrdev
, ctx
->sh_desc_update_first_dma
)) {
368 dev_err(jrdev
, "unable to map shared descriptor\n");
372 print_hex_dump(KERN_ERR
,
373 "ahash update first shdesc@"__stringify(__LINE__
)": ",
374 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
377 /* ahash_final shared descriptor */
378 desc
= ctx
->sh_desc_fin
;
380 ahash_ctx_data_to_out(desc
, have_key
| ctx
->alg_type
,
381 OP_ALG_AS_FINALIZE
, digestsize
, ctx
);
383 ctx
->sh_desc_fin_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
385 if (dma_mapping_error(jrdev
, ctx
->sh_desc_fin_dma
)) {
386 dev_err(jrdev
, "unable to map shared descriptor\n");
390 print_hex_dump(KERN_ERR
, "ahash final shdesc@"__stringify(__LINE__
)": ",
391 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
392 desc_bytes(desc
), 1);
395 /* ahash_finup shared descriptor */
396 desc
= ctx
->sh_desc_finup
;
398 ahash_ctx_data_to_out(desc
, have_key
| ctx
->alg_type
,
399 OP_ALG_AS_FINALIZE
, digestsize
, ctx
);
401 ctx
->sh_desc_finup_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
403 if (dma_mapping_error(jrdev
, ctx
->sh_desc_finup_dma
)) {
404 dev_err(jrdev
, "unable to map shared descriptor\n");
408 print_hex_dump(KERN_ERR
, "ahash finup shdesc@"__stringify(__LINE__
)": ",
409 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
410 desc_bytes(desc
), 1);
413 /* ahash_digest shared descriptor */
414 desc
= ctx
->sh_desc_digest
;
416 ahash_data_to_out(desc
, have_key
| ctx
->alg_type
, OP_ALG_AS_INITFINAL
,
419 ctx
->sh_desc_digest_dma
= dma_map_single(jrdev
, desc
,
422 if (dma_mapping_error(jrdev
, ctx
->sh_desc_digest_dma
)) {
423 dev_err(jrdev
, "unable to map shared descriptor\n");
427 print_hex_dump(KERN_ERR
,
428 "ahash digest shdesc@"__stringify(__LINE__
)": ",
429 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
430 desc_bytes(desc
), 1);
436 static int gen_split_hash_key(struct caam_hash_ctx
*ctx
, const u8
*key_in
,
439 return gen_split_key(ctx
->jrdev
, ctx
->key
, ctx
->split_key_len
,
440 ctx
->split_key_pad_len
, key_in
, keylen
,
444 /* Digest hash size if it is too large */
445 static int hash_digest_key(struct caam_hash_ctx
*ctx
, const u8
*key_in
,
446 u32
*keylen
, u8
*key_out
, u32 digestsize
)
448 struct device
*jrdev
= ctx
->jrdev
;
450 struct split_key_result result
;
451 dma_addr_t src_dma
, dst_dma
;
454 desc
= kmalloc(CAAM_CMD_SZ
* 8 + CAAM_PTR_SZ
* 2, GFP_KERNEL
| GFP_DMA
);
456 dev_err(jrdev
, "unable to allocate key input memory\n");
460 init_job_desc(desc
, 0);
462 src_dma
= dma_map_single(jrdev
, (void *)key_in
, *keylen
,
464 if (dma_mapping_error(jrdev
, src_dma
)) {
465 dev_err(jrdev
, "unable to map key input memory\n");
469 dst_dma
= dma_map_single(jrdev
, (void *)key_out
, digestsize
,
471 if (dma_mapping_error(jrdev
, dst_dma
)) {
472 dev_err(jrdev
, "unable to map key output memory\n");
473 dma_unmap_single(jrdev
, src_dma
, *keylen
, DMA_TO_DEVICE
);
478 /* Job descriptor to perform unkeyed hash on key_in */
479 append_operation(desc
, ctx
->alg_type
| OP_ALG_ENCRYPT
|
480 OP_ALG_AS_INITFINAL
);
481 append_seq_in_ptr(desc
, src_dma
, *keylen
, 0);
482 append_seq_fifo_load(desc
, *keylen
, FIFOLD_CLASS_CLASS2
|
483 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_MSG
);
484 append_seq_out_ptr(desc
, dst_dma
, digestsize
, 0);
485 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
486 LDST_SRCDST_BYTE_CONTEXT
);
489 print_hex_dump(KERN_ERR
, "key_in@"__stringify(__LINE__
)": ",
490 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
, *keylen
, 1);
491 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
492 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
496 init_completion(&result
.completion
);
498 ret
= caam_jr_enqueue(jrdev
, desc
, split_key_done
, &result
);
501 wait_for_completion_interruptible(&result
.completion
);
504 print_hex_dump(KERN_ERR
,
505 "digested key@"__stringify(__LINE__
)": ",
506 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
,
510 dma_unmap_single(jrdev
, src_dma
, *keylen
, DMA_TO_DEVICE
);
511 dma_unmap_single(jrdev
, dst_dma
, digestsize
, DMA_FROM_DEVICE
);
513 *keylen
= digestsize
;
520 static int ahash_setkey(struct crypto_ahash
*ahash
,
521 const u8
*key
, unsigned int keylen
)
523 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
524 static const u8 mdpadlen
[] = { 16, 20, 32, 32, 64, 64 };
525 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
526 struct device
*jrdev
= ctx
->jrdev
;
527 int blocksize
= crypto_tfm_alg_blocksize(&ahash
->base
);
528 int digestsize
= crypto_ahash_digestsize(ahash
);
530 u8
*hashed_key
= NULL
;
533 printk(KERN_ERR
"keylen %d\n", keylen
);
536 if (keylen
> blocksize
) {
537 hashed_key
= kmalloc(sizeof(u8
) * digestsize
, GFP_KERNEL
|
541 ret
= hash_digest_key(ctx
, key
, &keylen
, hashed_key
,
548 /* Pick class 2 key length from algorithm submask */
549 ctx
->split_key_len
= mdpadlen
[(ctx
->alg_op
& OP_ALG_ALGSEL_SUBMASK
) >>
550 OP_ALG_ALGSEL_SHIFT
] * 2;
551 ctx
->split_key_pad_len
= ALIGN(ctx
->split_key_len
, 16);
554 printk(KERN_ERR
"split_key_len %d split_key_pad_len %d\n",
555 ctx
->split_key_len
, ctx
->split_key_pad_len
);
556 print_hex_dump(KERN_ERR
, "key in @"__stringify(__LINE__
)": ",
557 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
560 ret
= gen_split_hash_key(ctx
, key
, keylen
);
564 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, ctx
->split_key_pad_len
,
566 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
567 dev_err(jrdev
, "unable to map key i/o memory\n");
572 print_hex_dump(KERN_ERR
, "ctx.key@"__stringify(__LINE__
)": ",
573 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
574 ctx
->split_key_pad_len
, 1);
577 ret
= ahash_set_sh_desc(ahash
);
579 dma_unmap_single(jrdev
, ctx
->key_dma
, ctx
->split_key_pad_len
,
588 crypto_ahash_set_flags(ahash
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
593 * ahash_edesc - s/w-extended ahash descriptor
594 * @dst_dma: physical mapped address of req->result
595 * @sec4_sg_dma: physical mapped address of h/w link table
596 * @src_nents: number of segments in input scatterlist
597 * @sec4_sg_bytes: length of dma mapped sec4_sg space
598 * @sec4_sg: pointer to h/w link table
599 * @hw_desc: the h/w job descriptor followed by any referenced link tables
603 dma_addr_t sec4_sg_dma
;
606 struct sec4_sg_entry
*sec4_sg
;
610 static inline void ahash_unmap(struct device
*dev
,
611 struct ahash_edesc
*edesc
,
612 struct ahash_request
*req
, int dst_len
)
614 if (edesc
->src_nents
)
615 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
617 dma_unmap_single(dev
, edesc
->dst_dma
, dst_len
, DMA_FROM_DEVICE
);
619 if (edesc
->sec4_sg_bytes
)
620 dma_unmap_single(dev
, edesc
->sec4_sg_dma
,
621 edesc
->sec4_sg_bytes
, DMA_TO_DEVICE
);
624 static inline void ahash_unmap_ctx(struct device
*dev
,
625 struct ahash_edesc
*edesc
,
626 struct ahash_request
*req
, int dst_len
, u32 flag
)
628 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
629 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
630 struct caam_hash_state
*state
= ahash_request_ctx(req
);
633 dma_unmap_single(dev
, state
->ctx_dma
, ctx
->ctx_len
, flag
);
634 ahash_unmap(dev
, edesc
, req
, dst_len
);
637 static void ahash_done(struct device
*jrdev
, u32
*desc
, u32 err
,
640 struct ahash_request
*req
= context
;
641 struct ahash_edesc
*edesc
;
642 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
643 int digestsize
= crypto_ahash_digestsize(ahash
);
645 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
646 struct caam_hash_state
*state
= ahash_request_ctx(req
);
648 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
651 edesc
= (struct ahash_edesc
*)((char *)desc
-
652 offsetof(struct ahash_edesc
, hw_desc
));
654 caam_jr_strstatus(jrdev
, err
);
656 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
660 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
661 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
664 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
665 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
669 req
->base
.complete(&req
->base
, err
);
672 static void ahash_done_bi(struct device
*jrdev
, u32
*desc
, u32 err
,
675 struct ahash_request
*req
= context
;
676 struct ahash_edesc
*edesc
;
677 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
678 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
680 struct caam_hash_state
*state
= ahash_request_ctx(req
);
681 int digestsize
= crypto_ahash_digestsize(ahash
);
683 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
686 edesc
= (struct ahash_edesc
*)((char *)desc
-
687 offsetof(struct ahash_edesc
, hw_desc
));
689 caam_jr_strstatus(jrdev
, err
);
691 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
695 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
696 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
699 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
700 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
704 req
->base
.complete(&req
->base
, err
);
707 static void ahash_done_ctx_src(struct device
*jrdev
, u32
*desc
, u32 err
,
710 struct ahash_request
*req
= context
;
711 struct ahash_edesc
*edesc
;
712 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
713 int digestsize
= crypto_ahash_digestsize(ahash
);
715 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
716 struct caam_hash_state
*state
= ahash_request_ctx(req
);
718 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
721 edesc
= (struct ahash_edesc
*)((char *)desc
-
722 offsetof(struct ahash_edesc
, hw_desc
));
724 caam_jr_strstatus(jrdev
, err
);
726 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_TO_DEVICE
);
730 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
731 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
734 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
735 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
739 req
->base
.complete(&req
->base
, err
);
742 static void ahash_done_ctx_dst(struct device
*jrdev
, u32
*desc
, u32 err
,
745 struct ahash_request
*req
= context
;
746 struct ahash_edesc
*edesc
;
747 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
748 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
750 struct caam_hash_state
*state
= ahash_request_ctx(req
);
751 int digestsize
= crypto_ahash_digestsize(ahash
);
753 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
756 edesc
= (struct ahash_edesc
*)((char *)desc
-
757 offsetof(struct ahash_edesc
, hw_desc
));
759 caam_jr_strstatus(jrdev
, err
);
761 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_FROM_DEVICE
);
765 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
766 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
769 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
770 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
774 req
->base
.complete(&req
->base
, err
);
777 /* submit update job descriptor */
778 static int ahash_update_ctx(struct ahash_request
*req
)
780 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
781 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
782 struct caam_hash_state
*state
= ahash_request_ctx(req
);
783 struct device
*jrdev
= ctx
->jrdev
;
784 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
785 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
786 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
787 int *buflen
= state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
788 u8
*next_buf
= state
->current_buf
? state
->buf_0
: state
->buf_1
;
789 int *next_buflen
= state
->current_buf
? &state
->buflen_0
:
790 &state
->buflen_1
, last_buflen
;
791 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
792 u32
*sh_desc
= ctx
->sh_desc_update
, *desc
;
793 dma_addr_t ptr
= ctx
->sh_desc_update_dma
;
794 int src_nents
, sec4_sg_bytes
, sec4_sg_src_index
;
795 struct ahash_edesc
*edesc
;
799 last_buflen
= *next_buflen
;
800 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
801 to_hash
= in_len
- *next_buflen
;
804 src_nents
= sg_nents_for_len(req
->src
,
805 req
->nbytes
- (*next_buflen
));
807 dev_err(jrdev
, "Invalid number of src SG.\n");
810 sec4_sg_src_index
= 1 + (*buflen
? 1 : 0);
811 sec4_sg_bytes
= (sec4_sg_src_index
+ src_nents
) *
812 sizeof(struct sec4_sg_entry
);
815 * allocate space for base edesc and hw desc commands,
818 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
+
819 sec4_sg_bytes
, GFP_DMA
| flags
);
822 "could not allocate extended descriptor\n");
826 edesc
->src_nents
= src_nents
;
827 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
828 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
831 ret
= ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
832 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
836 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
,
839 *buflen
, last_buflen
);
842 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
,
843 edesc
->sec4_sg
+ sec4_sg_src_index
);
845 scatterwalk_map_and_copy(next_buf
, req
->src
,
849 (edesc
->sec4_sg
+ sec4_sg_src_index
- 1)->len
|=
850 cpu_to_caam32(SEC4_SG_LEN_FIN
);
853 state
->current_buf
= !state
->current_buf
;
855 sh_len
= desc_len(sh_desc
);
856 desc
= edesc
->hw_desc
;
857 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
|
860 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
863 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
864 dev_err(jrdev
, "unable to map S/G table\n");
868 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+
871 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx
->ctx_len
, 0);
874 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
875 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
876 desc_bytes(desc
), 1);
879 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_bi
, req
);
883 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
,
887 } else if (*next_buflen
) {
888 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
890 *buflen
= *next_buflen
;
891 *next_buflen
= last_buflen
;
894 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
895 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
896 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
897 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
904 static int ahash_final_ctx(struct ahash_request
*req
)
906 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
907 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
908 struct caam_hash_state
*state
= ahash_request_ctx(req
);
909 struct device
*jrdev
= ctx
->jrdev
;
910 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
911 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
912 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
913 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
914 int last_buflen
= state
->current_buf
? state
->buflen_0
:
916 u32
*sh_desc
= ctx
->sh_desc_fin
, *desc
;
917 dma_addr_t ptr
= ctx
->sh_desc_fin_dma
;
918 int sec4_sg_bytes
, sec4_sg_src_index
;
919 int digestsize
= crypto_ahash_digestsize(ahash
);
920 struct ahash_edesc
*edesc
;
924 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
925 sec4_sg_bytes
= sec4_sg_src_index
* sizeof(struct sec4_sg_entry
);
927 /* allocate space for base edesc and hw desc commands, link tables */
928 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
+ sec4_sg_bytes
,
931 dev_err(jrdev
, "could not allocate extended descriptor\n");
935 sh_len
= desc_len(sh_desc
);
936 desc
= edesc
->hw_desc
;
937 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
939 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
940 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
942 edesc
->src_nents
= 0;
944 ret
= ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
945 edesc
->sec4_sg
, DMA_TO_DEVICE
);
949 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1,
950 buf
, state
->buf_dma
, buflen
,
952 (edesc
->sec4_sg
+ sec4_sg_src_index
- 1)->len
|=
953 cpu_to_caam32(SEC4_SG_LEN_FIN
);
955 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
956 sec4_sg_bytes
, DMA_TO_DEVICE
);
957 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
958 dev_err(jrdev
, "unable to map S/G table\n");
962 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+ buflen
,
965 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
967 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
968 dev_err(jrdev
, "unable to map dst\n");
973 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
974 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
977 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
981 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
988 static int ahash_finup_ctx(struct ahash_request
*req
)
990 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
991 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
992 struct caam_hash_state
*state
= ahash_request_ctx(req
);
993 struct device
*jrdev
= ctx
->jrdev
;
994 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
995 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
996 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
997 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
998 int last_buflen
= state
->current_buf
? state
->buflen_0
:
1000 u32
*sh_desc
= ctx
->sh_desc_finup
, *desc
;
1001 dma_addr_t ptr
= ctx
->sh_desc_finup_dma
;
1002 int sec4_sg_bytes
, sec4_sg_src_index
;
1004 int digestsize
= crypto_ahash_digestsize(ahash
);
1005 struct ahash_edesc
*edesc
;
1009 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1010 if (src_nents
< 0) {
1011 dev_err(jrdev
, "Invalid number of src SG.\n");
1014 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
1015 sec4_sg_bytes
= (sec4_sg_src_index
+ src_nents
) *
1016 sizeof(struct sec4_sg_entry
);
1018 /* allocate space for base edesc and hw desc commands, link tables */
1019 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
+ sec4_sg_bytes
,
1022 dev_err(jrdev
, "could not allocate extended descriptor\n");
1026 sh_len
= desc_len(sh_desc
);
1027 desc
= edesc
->hw_desc
;
1028 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1030 edesc
->src_nents
= src_nents
;
1031 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1032 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1035 ret
= ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
1036 edesc
->sec4_sg
, DMA_TO_DEVICE
);
1040 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1,
1041 buf
, state
->buf_dma
, buflen
,
1044 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
, edesc
->sec4_sg
+
1047 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1048 sec4_sg_bytes
, DMA_TO_DEVICE
);
1049 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1050 dev_err(jrdev
, "unable to map S/G table\n");
1054 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+
1055 buflen
+ req
->nbytes
, LDST_SGF
);
1057 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1059 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1060 dev_err(jrdev
, "unable to map dst\n");
1065 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1066 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1069 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
1073 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
1080 static int ahash_digest(struct ahash_request
*req
)
1082 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1083 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1084 struct device
*jrdev
= ctx
->jrdev
;
1085 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1086 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1087 u32
*sh_desc
= ctx
->sh_desc_digest
, *desc
;
1088 dma_addr_t ptr
= ctx
->sh_desc_digest_dma
;
1089 int digestsize
= crypto_ahash_digestsize(ahash
);
1090 int src_nents
, sec4_sg_bytes
;
1092 struct ahash_edesc
*edesc
;
1097 src_nents
= sg_count(req
->src
, req
->nbytes
);
1098 if (src_nents
< 0) {
1099 dev_err(jrdev
, "Invalid number of src SG.\n");
1102 dma_map_sg(jrdev
, req
->src
, src_nents
? : 1, DMA_TO_DEVICE
);
1103 sec4_sg_bytes
= src_nents
* sizeof(struct sec4_sg_entry
);
1105 /* allocate space for base edesc and hw desc commands, link tables */
1106 edesc
= kzalloc(sizeof(*edesc
) + sec4_sg_bytes
+ DESC_JOB_IO_LEN
,
1109 dev_err(jrdev
, "could not allocate extended descriptor\n");
1112 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1114 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1115 edesc
->src_nents
= src_nents
;
1117 sh_len
= desc_len(sh_desc
);
1118 desc
= edesc
->hw_desc
;
1119 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1122 sg_to_sec4_sg_last(req
->src
, src_nents
, edesc
->sec4_sg
, 0);
1123 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1124 sec4_sg_bytes
, DMA_TO_DEVICE
);
1125 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1126 dev_err(jrdev
, "unable to map S/G table\n");
1129 src_dma
= edesc
->sec4_sg_dma
;
1132 src_dma
= sg_dma_address(req
->src
);
1135 append_seq_in_ptr(desc
, src_dma
, req
->nbytes
, options
);
1137 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1139 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1140 dev_err(jrdev
, "unable to map dst\n");
1145 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1146 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1149 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1153 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1160 /* submit ahash final if it the first job descriptor */
1161 static int ahash_final_no_ctx(struct ahash_request
*req
)
1163 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1164 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1165 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1166 struct device
*jrdev
= ctx
->jrdev
;
1167 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1168 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1169 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1170 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
1171 u32
*sh_desc
= ctx
->sh_desc_digest
, *desc
;
1172 dma_addr_t ptr
= ctx
->sh_desc_digest_dma
;
1173 int digestsize
= crypto_ahash_digestsize(ahash
);
1174 struct ahash_edesc
*edesc
;
1178 /* allocate space for base edesc and hw desc commands, link tables */
1179 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
, GFP_DMA
| flags
);
1181 dev_err(jrdev
, "could not allocate extended descriptor\n");
1185 edesc
->sec4_sg_bytes
= 0;
1186 sh_len
= desc_len(sh_desc
);
1187 desc
= edesc
->hw_desc
;
1188 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1190 state
->buf_dma
= dma_map_single(jrdev
, buf
, buflen
, DMA_TO_DEVICE
);
1191 if (dma_mapping_error(jrdev
, state
->buf_dma
)) {
1192 dev_err(jrdev
, "unable to map src\n");
1196 append_seq_in_ptr(desc
, state
->buf_dma
, buflen
, 0);
1198 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1200 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1201 dev_err(jrdev
, "unable to map dst\n");
1204 edesc
->src_nents
= 0;
1207 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1208 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1211 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1215 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1222 /* submit ahash update if it the first job descriptor after update */
1223 static int ahash_update_no_ctx(struct ahash_request
*req
)
1225 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1226 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1227 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1228 struct device
*jrdev
= ctx
->jrdev
;
1229 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1230 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1231 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1232 int *buflen
= state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
1233 u8
*next_buf
= state
->current_buf
? state
->buf_0
: state
->buf_1
;
1234 int *next_buflen
= state
->current_buf
? &state
->buflen_0
:
1236 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
1237 int sec4_sg_bytes
, src_nents
;
1238 struct ahash_edesc
*edesc
;
1239 u32
*desc
, *sh_desc
= ctx
->sh_desc_update_first
;
1240 dma_addr_t ptr
= ctx
->sh_desc_update_first_dma
;
1244 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
1245 to_hash
= in_len
- *next_buflen
;
1248 src_nents
= sg_nents_for_len(req
->src
,
1249 req
->nbytes
- (*next_buflen
));
1250 if (src_nents
< 0) {
1251 dev_err(jrdev
, "Invalid number of src SG.\n");
1254 sec4_sg_bytes
= (1 + src_nents
) *
1255 sizeof(struct sec4_sg_entry
);
1258 * allocate space for base edesc and hw desc commands,
1261 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
+
1262 sec4_sg_bytes
, GFP_DMA
| flags
);
1265 "could not allocate extended descriptor\n");
1269 edesc
->src_nents
= src_nents
;
1270 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1271 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1275 state
->buf_dma
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
,
1277 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
,
1278 edesc
->sec4_sg
+ 1);
1280 scatterwalk_map_and_copy(next_buf
, req
->src
,
1285 state
->current_buf
= !state
->current_buf
;
1287 sh_len
= desc_len(sh_desc
);
1288 desc
= edesc
->hw_desc
;
1289 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
|
1292 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1295 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1296 dev_err(jrdev
, "unable to map S/G table\n");
1300 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, to_hash
, LDST_SGF
);
1302 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1307 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1308 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1309 desc_bytes(desc
), 1);
1312 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
, req
);
1315 state
->update
= ahash_update_ctx
;
1316 state
->finup
= ahash_finup_ctx
;
1317 state
->final
= ahash_final_ctx
;
1319 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
,
1323 } else if (*next_buflen
) {
1324 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
1326 *buflen
= *next_buflen
;
1330 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
1331 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
1332 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1333 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1340 /* submit ahash finup if it the first job descriptor after update */
1341 static int ahash_finup_no_ctx(struct ahash_request
*req
)
1343 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1344 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1345 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1346 struct device
*jrdev
= ctx
->jrdev
;
1347 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1348 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1349 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1350 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
1351 int last_buflen
= state
->current_buf
? state
->buflen_0
:
1353 u32
*sh_desc
= ctx
->sh_desc_digest
, *desc
;
1354 dma_addr_t ptr
= ctx
->sh_desc_digest_dma
;
1355 int sec4_sg_bytes
, sec4_sg_src_index
, src_nents
;
1356 int digestsize
= crypto_ahash_digestsize(ahash
);
1357 struct ahash_edesc
*edesc
;
1361 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1362 if (src_nents
< 0) {
1363 dev_err(jrdev
, "Invalid number of src SG.\n");
1366 sec4_sg_src_index
= 2;
1367 sec4_sg_bytes
= (sec4_sg_src_index
+ src_nents
) *
1368 sizeof(struct sec4_sg_entry
);
1370 /* allocate space for base edesc and hw desc commands, link tables */
1371 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
+ sec4_sg_bytes
,
1374 dev_err(jrdev
, "could not allocate extended descriptor\n");
1378 sh_len
= desc_len(sh_desc
);
1379 desc
= edesc
->hw_desc
;
1380 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1382 edesc
->src_nents
= src_nents
;
1383 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1384 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1387 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, buf
,
1388 state
->buf_dma
, buflen
,
1391 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
, edesc
->sec4_sg
+ 1);
1393 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1394 sec4_sg_bytes
, DMA_TO_DEVICE
);
1395 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1396 dev_err(jrdev
, "unable to map S/G table\n");
1400 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, buflen
+
1401 req
->nbytes
, LDST_SGF
);
1403 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1405 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1406 dev_err(jrdev
, "unable to map dst\n");
1411 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1412 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1415 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1419 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1426 /* submit first update job descriptor after init */
1427 static int ahash_update_first(struct ahash_request
*req
)
1429 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1430 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1431 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1432 struct device
*jrdev
= ctx
->jrdev
;
1433 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1434 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1435 u8
*next_buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1436 int *next_buflen
= state
->current_buf
?
1437 &state
->buflen_1
: &state
->buflen_0
;
1439 u32
*sh_desc
= ctx
->sh_desc_update_first
, *desc
;
1440 dma_addr_t ptr
= ctx
->sh_desc_update_first_dma
;
1441 int sec4_sg_bytes
, src_nents
;
1444 struct ahash_edesc
*edesc
;
1448 *next_buflen
= req
->nbytes
& (crypto_tfm_alg_blocksize(&ahash
->base
) -
1450 to_hash
= req
->nbytes
- *next_buflen
;
1453 src_nents
= sg_count(req
->src
, req
->nbytes
- (*next_buflen
));
1454 if (src_nents
< 0) {
1455 dev_err(jrdev
, "Invalid number of src SG.\n");
1458 dma_map_sg(jrdev
, req
->src
, src_nents
? : 1, DMA_TO_DEVICE
);
1459 sec4_sg_bytes
= src_nents
* sizeof(struct sec4_sg_entry
);
1462 * allocate space for base edesc and hw desc commands,
1465 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
+
1466 sec4_sg_bytes
, GFP_DMA
| flags
);
1469 "could not allocate extended descriptor\n");
1473 edesc
->src_nents
= src_nents
;
1474 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1475 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1480 sg_to_sec4_sg_last(req
->src
, src_nents
,
1482 edesc
->sec4_sg_dma
= dma_map_single(jrdev
,
1486 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1487 dev_err(jrdev
, "unable to map S/G table\n");
1490 src_dma
= edesc
->sec4_sg_dma
;
1493 src_dma
= sg_dma_address(req
->src
);
1498 scatterwalk_map_and_copy(next_buf
, req
->src
, to_hash
,
1501 sh_len
= desc_len(sh_desc
);
1502 desc
= edesc
->hw_desc
;
1503 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
|
1506 append_seq_in_ptr(desc
, src_dma
, to_hash
, options
);
1508 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1513 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1514 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1515 desc_bytes(desc
), 1);
1518 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
,
1522 state
->update
= ahash_update_ctx
;
1523 state
->finup
= ahash_finup_ctx
;
1524 state
->final
= ahash_final_ctx
;
1526 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
,
1530 } else if (*next_buflen
) {
1531 state
->update
= ahash_update_no_ctx
;
1532 state
->finup
= ahash_finup_no_ctx
;
1533 state
->final
= ahash_final_no_ctx
;
1534 scatterwalk_map_and_copy(next_buf
, req
->src
, 0,
1538 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1539 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1546 static int ahash_finup_first(struct ahash_request
*req
)
1548 return ahash_digest(req
);
1551 static int ahash_init(struct ahash_request
*req
)
1553 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1555 state
->update
= ahash_update_first
;
1556 state
->finup
= ahash_finup_first
;
1557 state
->final
= ahash_final_no_ctx
;
1559 state
->current_buf
= 0;
1561 state
->buflen_0
= 0;
1562 state
->buflen_1
= 0;
1567 static int ahash_update(struct ahash_request
*req
)
1569 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1571 return state
->update(req
);
1574 static int ahash_finup(struct ahash_request
*req
)
1576 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1578 return state
->finup(req
);
1581 static int ahash_final(struct ahash_request
*req
)
1583 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1585 return state
->final(req
);
1588 static int ahash_export(struct ahash_request
*req
, void *out
)
1590 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1591 struct caam_export_state
*export
= out
;
1595 if (state
->current_buf
) {
1597 len
= state
->buflen_1
;
1600 len
= state
->buflen_0
;
1603 memcpy(export
->buf
, buf
, len
);
1604 memcpy(export
->caam_ctx
, state
->caam_ctx
, sizeof(export
->caam_ctx
));
1605 export
->buflen
= len
;
1606 export
->update
= state
->update
;
1607 export
->final
= state
->final
;
1608 export
->finup
= state
->finup
;
1613 static int ahash_import(struct ahash_request
*req
, const void *in
)
1615 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1616 const struct caam_export_state
*export
= in
;
1618 memset(state
, 0, sizeof(*state
));
1619 memcpy(state
->buf_0
, export
->buf
, export
->buflen
);
1620 memcpy(state
->caam_ctx
, export
->caam_ctx
, sizeof(state
->caam_ctx
));
1621 state
->buflen_0
= export
->buflen
;
1622 state
->update
= export
->update
;
1623 state
->final
= export
->final
;
1624 state
->finup
= export
->finup
;
1629 struct caam_hash_template
{
1630 char name
[CRYPTO_MAX_ALG_NAME
];
1631 char driver_name
[CRYPTO_MAX_ALG_NAME
];
1632 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
1633 char hmac_driver_name
[CRYPTO_MAX_ALG_NAME
];
1634 unsigned int blocksize
;
1635 struct ahash_alg template_ahash
;
1640 /* ahash descriptors */
1641 static struct caam_hash_template driver_hash
[] = {
1644 .driver_name
= "sha1-caam",
1645 .hmac_name
= "hmac(sha1)",
1646 .hmac_driver_name
= "hmac-sha1-caam",
1647 .blocksize
= SHA1_BLOCK_SIZE
,
1650 .update
= ahash_update
,
1651 .final
= ahash_final
,
1652 .finup
= ahash_finup
,
1653 .digest
= ahash_digest
,
1654 .export
= ahash_export
,
1655 .import
= ahash_import
,
1656 .setkey
= ahash_setkey
,
1658 .digestsize
= SHA1_DIGEST_SIZE
,
1659 .statesize
= sizeof(struct caam_export_state
),
1662 .alg_type
= OP_ALG_ALGSEL_SHA1
,
1663 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
1666 .driver_name
= "sha224-caam",
1667 .hmac_name
= "hmac(sha224)",
1668 .hmac_driver_name
= "hmac-sha224-caam",
1669 .blocksize
= SHA224_BLOCK_SIZE
,
1672 .update
= ahash_update
,
1673 .final
= ahash_final
,
1674 .finup
= ahash_finup
,
1675 .digest
= ahash_digest
,
1676 .export
= ahash_export
,
1677 .import
= ahash_import
,
1678 .setkey
= ahash_setkey
,
1680 .digestsize
= SHA224_DIGEST_SIZE
,
1681 .statesize
= sizeof(struct caam_export_state
),
1684 .alg_type
= OP_ALG_ALGSEL_SHA224
,
1685 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
1688 .driver_name
= "sha256-caam",
1689 .hmac_name
= "hmac(sha256)",
1690 .hmac_driver_name
= "hmac-sha256-caam",
1691 .blocksize
= SHA256_BLOCK_SIZE
,
1694 .update
= ahash_update
,
1695 .final
= ahash_final
,
1696 .finup
= ahash_finup
,
1697 .digest
= ahash_digest
,
1698 .export
= ahash_export
,
1699 .import
= ahash_import
,
1700 .setkey
= ahash_setkey
,
1702 .digestsize
= SHA256_DIGEST_SIZE
,
1703 .statesize
= sizeof(struct caam_export_state
),
1706 .alg_type
= OP_ALG_ALGSEL_SHA256
,
1707 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
1710 .driver_name
= "sha384-caam",
1711 .hmac_name
= "hmac(sha384)",
1712 .hmac_driver_name
= "hmac-sha384-caam",
1713 .blocksize
= SHA384_BLOCK_SIZE
,
1716 .update
= ahash_update
,
1717 .final
= ahash_final
,
1718 .finup
= ahash_finup
,
1719 .digest
= ahash_digest
,
1720 .export
= ahash_export
,
1721 .import
= ahash_import
,
1722 .setkey
= ahash_setkey
,
1724 .digestsize
= SHA384_DIGEST_SIZE
,
1725 .statesize
= sizeof(struct caam_export_state
),
1728 .alg_type
= OP_ALG_ALGSEL_SHA384
,
1729 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
1732 .driver_name
= "sha512-caam",
1733 .hmac_name
= "hmac(sha512)",
1734 .hmac_driver_name
= "hmac-sha512-caam",
1735 .blocksize
= SHA512_BLOCK_SIZE
,
1738 .update
= ahash_update
,
1739 .final
= ahash_final
,
1740 .finup
= ahash_finup
,
1741 .digest
= ahash_digest
,
1742 .export
= ahash_export
,
1743 .import
= ahash_import
,
1744 .setkey
= ahash_setkey
,
1746 .digestsize
= SHA512_DIGEST_SIZE
,
1747 .statesize
= sizeof(struct caam_export_state
),
1750 .alg_type
= OP_ALG_ALGSEL_SHA512
,
1751 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
1754 .driver_name
= "md5-caam",
1755 .hmac_name
= "hmac(md5)",
1756 .hmac_driver_name
= "hmac-md5-caam",
1757 .blocksize
= MD5_BLOCK_WORDS
* 4,
1760 .update
= ahash_update
,
1761 .final
= ahash_final
,
1762 .finup
= ahash_finup
,
1763 .digest
= ahash_digest
,
1764 .export
= ahash_export
,
1765 .import
= ahash_import
,
1766 .setkey
= ahash_setkey
,
1768 .digestsize
= MD5_DIGEST_SIZE
,
1769 .statesize
= sizeof(struct caam_export_state
),
1772 .alg_type
= OP_ALG_ALGSEL_MD5
,
1773 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
1777 struct caam_hash_alg
{
1778 struct list_head entry
;
1781 struct ahash_alg ahash_alg
;
1784 static int caam_hash_cra_init(struct crypto_tfm
*tfm
)
1786 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
1787 struct crypto_alg
*base
= tfm
->__crt_alg
;
1788 struct hash_alg_common
*halg
=
1789 container_of(base
, struct hash_alg_common
, base
);
1790 struct ahash_alg
*alg
=
1791 container_of(halg
, struct ahash_alg
, halg
);
1792 struct caam_hash_alg
*caam_hash
=
1793 container_of(alg
, struct caam_hash_alg
, ahash_alg
);
1794 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1795 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1796 static const u8 runninglen
[] = { HASH_MSG_LEN
+ MD5_DIGEST_SIZE
,
1797 HASH_MSG_LEN
+ SHA1_DIGEST_SIZE
,
1799 HASH_MSG_LEN
+ SHA256_DIGEST_SIZE
,
1801 HASH_MSG_LEN
+ SHA512_DIGEST_SIZE
};
1805 * Get a Job ring from Job Ring driver to ensure in-order
1806 * crypto request processing per tfm
1808 ctx
->jrdev
= caam_jr_alloc();
1809 if (IS_ERR(ctx
->jrdev
)) {
1810 pr_err("Job Ring Device allocation for transform failed\n");
1811 return PTR_ERR(ctx
->jrdev
);
1813 /* copy descriptor header template value */
1814 ctx
->alg_type
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_type
;
1815 ctx
->alg_op
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_op
;
1817 ctx
->ctx_len
= runninglen
[(ctx
->alg_op
& OP_ALG_ALGSEL_SUBMASK
) >>
1818 OP_ALG_ALGSEL_SHIFT
];
1820 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1821 sizeof(struct caam_hash_state
));
1823 ret
= ahash_set_sh_desc(ahash
);
1828 static void caam_hash_cra_exit(struct crypto_tfm
*tfm
)
1830 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1832 if (ctx
->sh_desc_update_dma
&&
1833 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_update_dma
))
1834 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_update_dma
,
1835 desc_bytes(ctx
->sh_desc_update
),
1837 if (ctx
->sh_desc_update_first_dma
&&
1838 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_update_first_dma
))
1839 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_update_first_dma
,
1840 desc_bytes(ctx
->sh_desc_update_first
),
1842 if (ctx
->sh_desc_fin_dma
&&
1843 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_fin_dma
))
1844 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_fin_dma
,
1845 desc_bytes(ctx
->sh_desc_fin
), DMA_TO_DEVICE
);
1846 if (ctx
->sh_desc_digest_dma
&&
1847 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_digest_dma
))
1848 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_digest_dma
,
1849 desc_bytes(ctx
->sh_desc_digest
),
1851 if (ctx
->sh_desc_finup_dma
&&
1852 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_finup_dma
))
1853 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_finup_dma
,
1854 desc_bytes(ctx
->sh_desc_finup
), DMA_TO_DEVICE
);
1856 caam_jr_free(ctx
->jrdev
);
1859 static void __exit
caam_algapi_hash_exit(void)
1861 struct caam_hash_alg
*t_alg
, *n
;
1863 if (!hash_list
.next
)
1866 list_for_each_entry_safe(t_alg
, n
, &hash_list
, entry
) {
1867 crypto_unregister_ahash(&t_alg
->ahash_alg
);
1868 list_del(&t_alg
->entry
);
1873 static struct caam_hash_alg
*
1874 caam_hash_alloc(struct caam_hash_template
*template,
1877 struct caam_hash_alg
*t_alg
;
1878 struct ahash_alg
*halg
;
1879 struct crypto_alg
*alg
;
1881 t_alg
= kzalloc(sizeof(*t_alg
), GFP_KERNEL
);
1883 pr_err("failed to allocate t_alg\n");
1884 return ERR_PTR(-ENOMEM
);
1887 t_alg
->ahash_alg
= template->template_ahash
;
1888 halg
= &t_alg
->ahash_alg
;
1889 alg
= &halg
->halg
.base
;
1892 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1893 template->hmac_name
);
1894 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1895 template->hmac_driver_name
);
1897 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1899 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1900 template->driver_name
);
1901 t_alg
->ahash_alg
.setkey
= NULL
;
1903 alg
->cra_module
= THIS_MODULE
;
1904 alg
->cra_init
= caam_hash_cra_init
;
1905 alg
->cra_exit
= caam_hash_cra_exit
;
1906 alg
->cra_ctxsize
= sizeof(struct caam_hash_ctx
);
1907 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
1908 alg
->cra_blocksize
= template->blocksize
;
1909 alg
->cra_alignmask
= 0;
1910 alg
->cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_TYPE_AHASH
;
1911 alg
->cra_type
= &crypto_ahash_type
;
1913 t_alg
->alg_type
= template->alg_type
;
1914 t_alg
->alg_op
= template->alg_op
;
1919 static int __init
caam_algapi_hash_init(void)
1921 struct device_node
*dev_node
;
1922 struct platform_device
*pdev
;
1923 struct device
*ctrldev
;
1925 struct caam_drv_private
*priv
;
1926 unsigned int md_limit
= SHA512_DIGEST_SIZE
;
1927 u32 cha_inst
, cha_vid
;
1929 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
1931 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec4.0");
1936 pdev
= of_find_device_by_node(dev_node
);
1938 of_node_put(dev_node
);
1942 ctrldev
= &pdev
->dev
;
1943 priv
= dev_get_drvdata(ctrldev
);
1944 of_node_put(dev_node
);
1947 * If priv is NULL, it's probably because the caam driver wasn't
1948 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1954 * Register crypto algorithms the device supports. First, identify
1955 * presence and attributes of MD block.
1957 cha_vid
= rd_reg32(&priv
->ctrl
->perfmon
.cha_id_ls
);
1958 cha_inst
= rd_reg32(&priv
->ctrl
->perfmon
.cha_num_ls
);
1961 * Skip registration of any hashing algorithms if MD block
1964 if (!((cha_inst
& CHA_ID_LS_MD_MASK
) >> CHA_ID_LS_MD_SHIFT
))
1967 /* Limit digest size based on LP256 */
1968 if ((cha_vid
& CHA_ID_LS_MD_MASK
) == CHA_ID_LS_MD_LP256
)
1969 md_limit
= SHA256_DIGEST_SIZE
;
1971 INIT_LIST_HEAD(&hash_list
);
1973 /* register crypto algorithms the device supports */
1974 for (i
= 0; i
< ARRAY_SIZE(driver_hash
); i
++) {
1975 struct caam_hash_alg
*t_alg
;
1976 struct caam_hash_template
*alg
= driver_hash
+ i
;
1978 /* If MD size is not supported by device, skip registration */
1979 if (alg
->template_ahash
.halg
.digestsize
> md_limit
)
1982 /* register hmac version */
1983 t_alg
= caam_hash_alloc(alg
, true);
1984 if (IS_ERR(t_alg
)) {
1985 err
= PTR_ERR(t_alg
);
1986 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
1990 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
1992 pr_warn("%s alg registration failed: %d\n",
1993 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
1997 list_add_tail(&t_alg
->entry
, &hash_list
);
1999 /* register unkeyed version */
2000 t_alg
= caam_hash_alloc(alg
, false);
2001 if (IS_ERR(t_alg
)) {
2002 err
= PTR_ERR(t_alg
);
2003 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
2007 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
2009 pr_warn("%s alg registration failed: %d\n",
2010 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
2014 list_add_tail(&t_alg
->entry
, &hash_list
);
2020 module_init(caam_algapi_hash_init
);
2021 module_exit(caam_algapi_hash_exit
);
2023 MODULE_LICENSE("GPL");
2024 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
2025 MODULE_AUTHOR("Freescale Semiconductor - NMG");