crypto: caam - fix DMA direction mismatch in ahash_done_ctx_dst
[deliverable/linux.git] / drivers / crypto / caam / caamhash.c
1 /*
2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
3 *
4 * Copyright 2011 Freescale Semiconductor, Inc.
5 *
6 * Based on caamalg.c crypto API driver.
7 *
8 * relationship of digest job descriptor or first job descriptor after init to
9 * shared descriptors:
10 *
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
15 * ---------------
16 *
17 * relationship of subsequent job descriptors to shared descriptors:
18 *
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
25 * --------------- | |
26 * | JobDesc #3 |------| |
27 * | *(packet 3) | |
28 * --------------- |
29 * . |
30 * . |
31 * --------------- |
32 * | JobDesc #4 |------------
33 * | *(packet 4) |
34 * ---------------
35 *
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
41 *
42 * So, a job desc looks like:
43 *
44 * ---------------------
45 * | Header |
46 * | ShareDesc Pointer |
47 * | SEQ_OUT_PTR |
48 * | (output buffer) |
49 * | (output length) |
50 * | SEQ_IN_PTR |
51 * | (input buffer) |
52 * | (input length) |
53 * ---------------------
54 */
55
56 #include "compat.h"
57
58 #include "regs.h"
59 #include "intern.h"
60 #include "desc_constr.h"
61 #include "jr.h"
62 #include "error.h"
63 #include "sg_sw_sec4.h"
64 #include "key_gen.h"
65
66 #define CAAM_CRA_PRIORITY 3000
67
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
70
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
73
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89
90 #ifdef DEBUG
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
93 #else
94 #define debug(format, arg...)
95 #endif
96
97
98 static struct list_head hash_list;
99
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102 struct device *jrdev;
103 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
104 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
105 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
106 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
107 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
108 dma_addr_t sh_desc_update_dma;
109 dma_addr_t sh_desc_update_first_dma;
110 dma_addr_t sh_desc_fin_dma;
111 dma_addr_t sh_desc_digest_dma;
112 dma_addr_t sh_desc_finup_dma;
113 u32 alg_type;
114 u32 alg_op;
115 u8 key[CAAM_MAX_HASH_KEY_SIZE];
116 dma_addr_t key_dma;
117 int ctx_len;
118 unsigned int split_key_len;
119 unsigned int split_key_pad_len;
120 };
121
122 /* ahash state */
123 struct caam_hash_state {
124 dma_addr_t buf_dma;
125 dma_addr_t ctx_dma;
126 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
127 int buflen_0;
128 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
129 int buflen_1;
130 u8 caam_ctx[MAX_CTX_LEN];
131 int (*update)(struct ahash_request *req);
132 int (*final)(struct ahash_request *req);
133 int (*finup)(struct ahash_request *req);
134 int current_buf;
135 };
136
137 /* Common job descriptor seq in/out ptr routines */
138
139 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
140 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
141 struct caam_hash_state *state,
142 int ctx_len)
143 {
144 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
145 ctx_len, DMA_FROM_DEVICE);
146 if (dma_mapping_error(jrdev, state->ctx_dma)) {
147 dev_err(jrdev, "unable to map ctx\n");
148 return -ENOMEM;
149 }
150
151 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
152
153 return 0;
154 }
155
156 /* Map req->result, and append seq_out_ptr command that points to it */
157 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
158 u8 *result, int digestsize)
159 {
160 dma_addr_t dst_dma;
161
162 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
163 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
164
165 return dst_dma;
166 }
167
168 /* Map current buffer in state and put it in link table */
169 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
170 struct sec4_sg_entry *sec4_sg,
171 u8 *buf, int buflen)
172 {
173 dma_addr_t buf_dma;
174
175 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
176 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
177
178 return buf_dma;
179 }
180
181 /* Map req->src and put it in link table */
182 static inline void src_map_to_sec4_sg(struct device *jrdev,
183 struct scatterlist *src, int src_nents,
184 struct sec4_sg_entry *sec4_sg,
185 bool chained)
186 {
187 dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
188 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
189 }
190
191 /*
192 * Only put buffer in link table if it contains data, which is possible,
193 * since a buffer has previously been used, and needs to be unmapped,
194 */
195 static inline dma_addr_t
196 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
197 u8 *buf, dma_addr_t buf_dma, int buflen,
198 int last_buflen)
199 {
200 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
201 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
202 if (buflen)
203 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
204 else
205 buf_dma = 0;
206
207 return buf_dma;
208 }
209
210 /* Map state->caam_ctx, and add it to link table */
211 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
212 struct caam_hash_state *state, int ctx_len,
213 struct sec4_sg_entry *sec4_sg, u32 flag)
214 {
215 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
216 if (dma_mapping_error(jrdev, state->ctx_dma)) {
217 dev_err(jrdev, "unable to map ctx\n");
218 return -ENOMEM;
219 }
220
221 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
222
223 return 0;
224 }
225
226 /* Common shared descriptor commands */
227 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
228 {
229 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
230 ctx->split_key_len, CLASS_2 |
231 KEY_DEST_MDHA_SPLIT | KEY_ENC);
232 }
233
234 /* Append key if it has been set */
235 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
236 {
237 u32 *key_jump_cmd;
238
239 init_sh_desc(desc, HDR_SHARE_SERIAL);
240
241 if (ctx->split_key_len) {
242 /* Skip if already shared */
243 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
244 JUMP_COND_SHRD);
245
246 append_key_ahash(desc, ctx);
247
248 set_jump_tgt_here(desc, key_jump_cmd);
249 }
250
251 /* Propagate errors from shared to job descriptor */
252 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
253 }
254
255 /*
256 * For ahash read data from seqin following state->caam_ctx,
257 * and write resulting class2 context to seqout, which may be state->caam_ctx
258 * or req->result
259 */
260 static inline void ahash_append_load_str(u32 *desc, int digestsize)
261 {
262 /* Calculate remaining bytes to read */
263 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
264
265 /* Read remaining bytes */
266 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
267 FIFOLD_TYPE_MSG | KEY_VLF);
268
269 /* Store class2 context bytes */
270 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
271 LDST_SRCDST_BYTE_CONTEXT);
272 }
273
274 /*
275 * For ahash update, final and finup, import context, read and write to seqout
276 */
277 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
278 int digestsize,
279 struct caam_hash_ctx *ctx)
280 {
281 init_sh_desc_key_ahash(desc, ctx);
282
283 /* Import context from software */
284 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
285 LDST_CLASS_2_CCB | ctx->ctx_len);
286
287 /* Class 2 operation */
288 append_operation(desc, op | state | OP_ALG_ENCRYPT);
289
290 /*
291 * Load from buf and/or src and write to req->result or state->context
292 */
293 ahash_append_load_str(desc, digestsize);
294 }
295
296 /* For ahash firsts and digest, read and write to seqout */
297 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
298 int digestsize, struct caam_hash_ctx *ctx)
299 {
300 init_sh_desc_key_ahash(desc, ctx);
301
302 /* Class 2 operation */
303 append_operation(desc, op | state | OP_ALG_ENCRYPT);
304
305 /*
306 * Load from buf and/or src and write to req->result or state->context
307 */
308 ahash_append_load_str(desc, digestsize);
309 }
310
311 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
312 {
313 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
314 int digestsize = crypto_ahash_digestsize(ahash);
315 struct device *jrdev = ctx->jrdev;
316 u32 have_key = 0;
317 u32 *desc;
318
319 if (ctx->split_key_len)
320 have_key = OP_ALG_AAI_HMAC_PRECOMP;
321
322 /* ahash_update shared descriptor */
323 desc = ctx->sh_desc_update;
324
325 init_sh_desc(desc, HDR_SHARE_SERIAL);
326
327 /* Import context from software */
328 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
329 LDST_CLASS_2_CCB | ctx->ctx_len);
330
331 /* Class 2 operation */
332 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
333 OP_ALG_ENCRYPT);
334
335 /* Load data and write to result or context */
336 ahash_append_load_str(desc, ctx->ctx_len);
337
338 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
339 DMA_TO_DEVICE);
340 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
341 dev_err(jrdev, "unable to map shared descriptor\n");
342 return -ENOMEM;
343 }
344 #ifdef DEBUG
345 print_hex_dump(KERN_ERR,
346 "ahash update shdesc@"__stringify(__LINE__)": ",
347 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
348 #endif
349
350 /* ahash_update_first shared descriptor */
351 desc = ctx->sh_desc_update_first;
352
353 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
354 ctx->ctx_len, ctx);
355
356 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
357 desc_bytes(desc),
358 DMA_TO_DEVICE);
359 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
360 dev_err(jrdev, "unable to map shared descriptor\n");
361 return -ENOMEM;
362 }
363 #ifdef DEBUG
364 print_hex_dump(KERN_ERR,
365 "ahash update first shdesc@"__stringify(__LINE__)": ",
366 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
367 #endif
368
369 /* ahash_final shared descriptor */
370 desc = ctx->sh_desc_fin;
371
372 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
373 OP_ALG_AS_FINALIZE, digestsize, ctx);
374
375 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
376 DMA_TO_DEVICE);
377 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
378 dev_err(jrdev, "unable to map shared descriptor\n");
379 return -ENOMEM;
380 }
381 #ifdef DEBUG
382 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
383 DUMP_PREFIX_ADDRESS, 16, 4, desc,
384 desc_bytes(desc), 1);
385 #endif
386
387 /* ahash_finup shared descriptor */
388 desc = ctx->sh_desc_finup;
389
390 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
391 OP_ALG_AS_FINALIZE, digestsize, ctx);
392
393 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
394 DMA_TO_DEVICE);
395 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
396 dev_err(jrdev, "unable to map shared descriptor\n");
397 return -ENOMEM;
398 }
399 #ifdef DEBUG
400 print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
401 DUMP_PREFIX_ADDRESS, 16, 4, desc,
402 desc_bytes(desc), 1);
403 #endif
404
405 /* ahash_digest shared descriptor */
406 desc = ctx->sh_desc_digest;
407
408 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
409 digestsize, ctx);
410
411 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
412 desc_bytes(desc),
413 DMA_TO_DEVICE);
414 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
415 dev_err(jrdev, "unable to map shared descriptor\n");
416 return -ENOMEM;
417 }
418 #ifdef DEBUG
419 print_hex_dump(KERN_ERR,
420 "ahash digest shdesc@"__stringify(__LINE__)": ",
421 DUMP_PREFIX_ADDRESS, 16, 4, desc,
422 desc_bytes(desc), 1);
423 #endif
424
425 return 0;
426 }
427
428 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
429 u32 keylen)
430 {
431 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
432 ctx->split_key_pad_len, key_in, keylen,
433 ctx->alg_op);
434 }
435
436 /* Digest hash size if it is too large */
437 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
438 u32 *keylen, u8 *key_out, u32 digestsize)
439 {
440 struct device *jrdev = ctx->jrdev;
441 u32 *desc;
442 struct split_key_result result;
443 dma_addr_t src_dma, dst_dma;
444 int ret = 0;
445
446 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
447 if (!desc) {
448 dev_err(jrdev, "unable to allocate key input memory\n");
449 return -ENOMEM;
450 }
451
452 init_job_desc(desc, 0);
453
454 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
455 DMA_TO_DEVICE);
456 if (dma_mapping_error(jrdev, src_dma)) {
457 dev_err(jrdev, "unable to map key input memory\n");
458 kfree(desc);
459 return -ENOMEM;
460 }
461 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
462 DMA_FROM_DEVICE);
463 if (dma_mapping_error(jrdev, dst_dma)) {
464 dev_err(jrdev, "unable to map key output memory\n");
465 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
466 kfree(desc);
467 return -ENOMEM;
468 }
469
470 /* Job descriptor to perform unkeyed hash on key_in */
471 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
472 OP_ALG_AS_INITFINAL);
473 append_seq_in_ptr(desc, src_dma, *keylen, 0);
474 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
475 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
476 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
477 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
478 LDST_SRCDST_BYTE_CONTEXT);
479
480 #ifdef DEBUG
481 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
482 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
483 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
484 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
485 #endif
486
487 result.err = 0;
488 init_completion(&result.completion);
489
490 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
491 if (!ret) {
492 /* in progress */
493 wait_for_completion_interruptible(&result.completion);
494 ret = result.err;
495 #ifdef DEBUG
496 print_hex_dump(KERN_ERR,
497 "digested key@"__stringify(__LINE__)": ",
498 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
499 digestsize, 1);
500 #endif
501 }
502 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
503 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
504
505 *keylen = digestsize;
506
507 kfree(desc);
508
509 return ret;
510 }
511
512 static int ahash_setkey(struct crypto_ahash *ahash,
513 const u8 *key, unsigned int keylen)
514 {
515 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
516 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
517 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
518 struct device *jrdev = ctx->jrdev;
519 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
520 int digestsize = crypto_ahash_digestsize(ahash);
521 int ret = 0;
522 u8 *hashed_key = NULL;
523
524 #ifdef DEBUG
525 printk(KERN_ERR "keylen %d\n", keylen);
526 #endif
527
528 if (keylen > blocksize) {
529 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
530 GFP_DMA);
531 if (!hashed_key)
532 return -ENOMEM;
533 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
534 digestsize);
535 if (ret)
536 goto badkey;
537 key = hashed_key;
538 }
539
540 /* Pick class 2 key length from algorithm submask */
541 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
542 OP_ALG_ALGSEL_SHIFT] * 2;
543 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
544
545 #ifdef DEBUG
546 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
547 ctx->split_key_len, ctx->split_key_pad_len);
548 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
549 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
550 #endif
551
552 ret = gen_split_hash_key(ctx, key, keylen);
553 if (ret)
554 goto badkey;
555
556 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
557 DMA_TO_DEVICE);
558 if (dma_mapping_error(jrdev, ctx->key_dma)) {
559 dev_err(jrdev, "unable to map key i/o memory\n");
560 ret = -ENOMEM;
561 goto map_err;
562 }
563 #ifdef DEBUG
564 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
565 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
566 ctx->split_key_pad_len, 1);
567 #endif
568
569 ret = ahash_set_sh_desc(ahash);
570 if (ret) {
571 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
572 DMA_TO_DEVICE);
573 }
574
575 map_err:
576 kfree(hashed_key);
577 return ret;
578 badkey:
579 kfree(hashed_key);
580 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
581 return -EINVAL;
582 }
583
584 /*
585 * ahash_edesc - s/w-extended ahash descriptor
586 * @dst_dma: physical mapped address of req->result
587 * @sec4_sg_dma: physical mapped address of h/w link table
588 * @chained: if source is chained
589 * @src_nents: number of segments in input scatterlist
590 * @sec4_sg_bytes: length of dma mapped sec4_sg space
591 * @sec4_sg: pointer to h/w link table
592 * @hw_desc: the h/w job descriptor followed by any referenced link tables
593 */
594 struct ahash_edesc {
595 dma_addr_t dst_dma;
596 dma_addr_t sec4_sg_dma;
597 bool chained;
598 int src_nents;
599 int sec4_sg_bytes;
600 struct sec4_sg_entry *sec4_sg;
601 u32 hw_desc[0];
602 };
603
604 static inline void ahash_unmap(struct device *dev,
605 struct ahash_edesc *edesc,
606 struct ahash_request *req, int dst_len)
607 {
608 if (edesc->src_nents)
609 dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
610 DMA_TO_DEVICE, edesc->chained);
611 if (edesc->dst_dma)
612 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
613
614 if (edesc->sec4_sg_bytes)
615 dma_unmap_single(dev, edesc->sec4_sg_dma,
616 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
617 }
618
619 static inline void ahash_unmap_ctx(struct device *dev,
620 struct ahash_edesc *edesc,
621 struct ahash_request *req, int dst_len, u32 flag)
622 {
623 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
624 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
625 struct caam_hash_state *state = ahash_request_ctx(req);
626
627 if (state->ctx_dma)
628 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
629 ahash_unmap(dev, edesc, req, dst_len);
630 }
631
632 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
633 void *context)
634 {
635 struct ahash_request *req = context;
636 struct ahash_edesc *edesc;
637 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
638 int digestsize = crypto_ahash_digestsize(ahash);
639 #ifdef DEBUG
640 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
641 struct caam_hash_state *state = ahash_request_ctx(req);
642
643 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
644 #endif
645
646 edesc = (struct ahash_edesc *)((char *)desc -
647 offsetof(struct ahash_edesc, hw_desc));
648 if (err)
649 caam_jr_strstatus(jrdev, err);
650
651 ahash_unmap(jrdev, edesc, req, digestsize);
652 kfree(edesc);
653
654 #ifdef DEBUG
655 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
656 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
657 ctx->ctx_len, 1);
658 if (req->result)
659 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
660 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
661 digestsize, 1);
662 #endif
663
664 req->base.complete(&req->base, err);
665 }
666
667 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
668 void *context)
669 {
670 struct ahash_request *req = context;
671 struct ahash_edesc *edesc;
672 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
673 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
674 #ifdef DEBUG
675 struct caam_hash_state *state = ahash_request_ctx(req);
676 int digestsize = crypto_ahash_digestsize(ahash);
677
678 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
679 #endif
680
681 edesc = (struct ahash_edesc *)((char *)desc -
682 offsetof(struct ahash_edesc, hw_desc));
683 if (err)
684 caam_jr_strstatus(jrdev, err);
685
686 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
687 kfree(edesc);
688
689 #ifdef DEBUG
690 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
691 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
692 ctx->ctx_len, 1);
693 if (req->result)
694 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
695 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
696 digestsize, 1);
697 #endif
698
699 req->base.complete(&req->base, err);
700 }
701
702 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
703 void *context)
704 {
705 struct ahash_request *req = context;
706 struct ahash_edesc *edesc;
707 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
708 int digestsize = crypto_ahash_digestsize(ahash);
709 #ifdef DEBUG
710 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
711 struct caam_hash_state *state = ahash_request_ctx(req);
712
713 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
714 #endif
715
716 edesc = (struct ahash_edesc *)((char *)desc -
717 offsetof(struct ahash_edesc, hw_desc));
718 if (err)
719 caam_jr_strstatus(jrdev, err);
720
721 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
722 kfree(edesc);
723
724 #ifdef DEBUG
725 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
726 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
727 ctx->ctx_len, 1);
728 if (req->result)
729 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
730 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
731 digestsize, 1);
732 #endif
733
734 req->base.complete(&req->base, err);
735 }
736
737 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
738 void *context)
739 {
740 struct ahash_request *req = context;
741 struct ahash_edesc *edesc;
742 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
743 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
744 #ifdef DEBUG
745 struct caam_hash_state *state = ahash_request_ctx(req);
746 int digestsize = crypto_ahash_digestsize(ahash);
747
748 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
749 #endif
750
751 edesc = (struct ahash_edesc *)((char *)desc -
752 offsetof(struct ahash_edesc, hw_desc));
753 if (err)
754 caam_jr_strstatus(jrdev, err);
755
756 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
757 kfree(edesc);
758
759 #ifdef DEBUG
760 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
761 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
762 ctx->ctx_len, 1);
763 if (req->result)
764 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
765 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
766 digestsize, 1);
767 #endif
768
769 req->base.complete(&req->base, err);
770 }
771
772 /* submit update job descriptor */
773 static int ahash_update_ctx(struct ahash_request *req)
774 {
775 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
776 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
777 struct caam_hash_state *state = ahash_request_ctx(req);
778 struct device *jrdev = ctx->jrdev;
779 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
780 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
781 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
782 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
783 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
784 int *next_buflen = state->current_buf ? &state->buflen_0 :
785 &state->buflen_1, last_buflen;
786 int in_len = *buflen + req->nbytes, to_hash;
787 u32 *sh_desc = ctx->sh_desc_update, *desc;
788 dma_addr_t ptr = ctx->sh_desc_update_dma;
789 int src_nents, sec4_sg_bytes, sec4_sg_src_index;
790 struct ahash_edesc *edesc;
791 bool chained = false;
792 int ret = 0;
793 int sh_len;
794
795 last_buflen = *next_buflen;
796 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
797 to_hash = in_len - *next_buflen;
798
799 if (to_hash) {
800 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
801 &chained);
802 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
803 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
804 sizeof(struct sec4_sg_entry);
805
806 /*
807 * allocate space for base edesc and hw desc commands,
808 * link tables
809 */
810 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
811 sec4_sg_bytes, GFP_DMA | flags);
812 if (!edesc) {
813 dev_err(jrdev,
814 "could not allocate extended descriptor\n");
815 return -ENOMEM;
816 }
817
818 edesc->src_nents = src_nents;
819 edesc->chained = chained;
820 edesc->sec4_sg_bytes = sec4_sg_bytes;
821 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
822 DESC_JOB_IO_LEN;
823
824 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
825 edesc->sec4_sg, DMA_BIDIRECTIONAL);
826 if (ret)
827 return ret;
828
829 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
830 edesc->sec4_sg + 1,
831 buf, state->buf_dma,
832 *buflen, last_buflen);
833
834 if (src_nents) {
835 src_map_to_sec4_sg(jrdev, req->src, src_nents,
836 edesc->sec4_sg + sec4_sg_src_index,
837 chained);
838 if (*next_buflen) {
839 sg_copy_part(next_buf, req->src, to_hash -
840 *buflen, req->nbytes);
841 state->current_buf = !state->current_buf;
842 }
843 } else {
844 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
845 SEC4_SG_LEN_FIN;
846 }
847
848 sh_len = desc_len(sh_desc);
849 desc = edesc->hw_desc;
850 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
851 HDR_REVERSE);
852
853 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
854 sec4_sg_bytes,
855 DMA_TO_DEVICE);
856 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
857 dev_err(jrdev, "unable to map S/G table\n");
858 return -ENOMEM;
859 }
860
861 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
862 to_hash, LDST_SGF);
863
864 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
865
866 #ifdef DEBUG
867 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
868 DUMP_PREFIX_ADDRESS, 16, 4, desc,
869 desc_bytes(desc), 1);
870 #endif
871
872 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
873 if (!ret) {
874 ret = -EINPROGRESS;
875 } else {
876 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
877 DMA_BIDIRECTIONAL);
878 kfree(edesc);
879 }
880 } else if (*next_buflen) {
881 sg_copy(buf + *buflen, req->src, req->nbytes);
882 *buflen = *next_buflen;
883 *next_buflen = last_buflen;
884 }
885 #ifdef DEBUG
886 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
887 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
888 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
889 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
890 *next_buflen, 1);
891 #endif
892
893 return ret;
894 }
895
896 static int ahash_final_ctx(struct ahash_request *req)
897 {
898 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
899 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
900 struct caam_hash_state *state = ahash_request_ctx(req);
901 struct device *jrdev = ctx->jrdev;
902 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
903 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
904 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
905 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
906 int last_buflen = state->current_buf ? state->buflen_0 :
907 state->buflen_1;
908 u32 *sh_desc = ctx->sh_desc_fin, *desc;
909 dma_addr_t ptr = ctx->sh_desc_fin_dma;
910 int sec4_sg_bytes;
911 int digestsize = crypto_ahash_digestsize(ahash);
912 struct ahash_edesc *edesc;
913 int ret = 0;
914 int sh_len;
915
916 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
917
918 /* allocate space for base edesc and hw desc commands, link tables */
919 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
920 sec4_sg_bytes, GFP_DMA | flags);
921 if (!edesc) {
922 dev_err(jrdev, "could not allocate extended descriptor\n");
923 return -ENOMEM;
924 }
925
926 sh_len = desc_len(sh_desc);
927 desc = edesc->hw_desc;
928 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
929
930 edesc->sec4_sg_bytes = sec4_sg_bytes;
931 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
932 DESC_JOB_IO_LEN;
933 edesc->src_nents = 0;
934
935 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
936 edesc->sec4_sg, DMA_TO_DEVICE);
937 if (ret)
938 return ret;
939
940 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
941 buf, state->buf_dma, buflen,
942 last_buflen);
943 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
944
945 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
946 sec4_sg_bytes, DMA_TO_DEVICE);
947 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
948 dev_err(jrdev, "unable to map S/G table\n");
949 return -ENOMEM;
950 }
951
952 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
953 LDST_SGF);
954
955 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
956 digestsize);
957 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
958 dev_err(jrdev, "unable to map dst\n");
959 return -ENOMEM;
960 }
961
962 #ifdef DEBUG
963 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
964 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
965 #endif
966
967 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
968 if (!ret) {
969 ret = -EINPROGRESS;
970 } else {
971 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
972 kfree(edesc);
973 }
974
975 return ret;
976 }
977
978 static int ahash_finup_ctx(struct ahash_request *req)
979 {
980 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
981 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
982 struct caam_hash_state *state = ahash_request_ctx(req);
983 struct device *jrdev = ctx->jrdev;
984 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
985 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
986 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
987 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
988 int last_buflen = state->current_buf ? state->buflen_0 :
989 state->buflen_1;
990 u32 *sh_desc = ctx->sh_desc_finup, *desc;
991 dma_addr_t ptr = ctx->sh_desc_finup_dma;
992 int sec4_sg_bytes, sec4_sg_src_index;
993 int src_nents;
994 int digestsize = crypto_ahash_digestsize(ahash);
995 struct ahash_edesc *edesc;
996 bool chained = false;
997 int ret = 0;
998 int sh_len;
999
1000 src_nents = __sg_count(req->src, req->nbytes, &chained);
1001 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1002 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1003 sizeof(struct sec4_sg_entry);
1004
1005 /* allocate space for base edesc and hw desc commands, link tables */
1006 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1007 sec4_sg_bytes, GFP_DMA | flags);
1008 if (!edesc) {
1009 dev_err(jrdev, "could not allocate extended descriptor\n");
1010 return -ENOMEM;
1011 }
1012
1013 sh_len = desc_len(sh_desc);
1014 desc = edesc->hw_desc;
1015 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1016
1017 edesc->src_nents = src_nents;
1018 edesc->chained = chained;
1019 edesc->sec4_sg_bytes = sec4_sg_bytes;
1020 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1021 DESC_JOB_IO_LEN;
1022
1023 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1024 edesc->sec4_sg, DMA_TO_DEVICE);
1025 if (ret)
1026 return ret;
1027
1028 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1029 buf, state->buf_dma, buflen,
1030 last_buflen);
1031
1032 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1033 sec4_sg_src_index, chained);
1034
1035 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1036 sec4_sg_bytes, DMA_TO_DEVICE);
1037 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1038 dev_err(jrdev, "unable to map S/G table\n");
1039 return -ENOMEM;
1040 }
1041
1042 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1043 buflen + req->nbytes, LDST_SGF);
1044
1045 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1046 digestsize);
1047 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1048 dev_err(jrdev, "unable to map dst\n");
1049 return -ENOMEM;
1050 }
1051
1052 #ifdef DEBUG
1053 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1054 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1055 #endif
1056
1057 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1058 if (!ret) {
1059 ret = -EINPROGRESS;
1060 } else {
1061 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1062 kfree(edesc);
1063 }
1064
1065 return ret;
1066 }
1067
1068 static int ahash_digest(struct ahash_request *req)
1069 {
1070 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1071 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1072 struct device *jrdev = ctx->jrdev;
1073 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1074 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1075 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1076 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1077 int digestsize = crypto_ahash_digestsize(ahash);
1078 int src_nents, sec4_sg_bytes;
1079 dma_addr_t src_dma;
1080 struct ahash_edesc *edesc;
1081 bool chained = false;
1082 int ret = 0;
1083 u32 options;
1084 int sh_len;
1085
1086 src_nents = sg_count(req->src, req->nbytes, &chained);
1087 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1088 chained);
1089 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1090
1091 /* allocate space for base edesc and hw desc commands, link tables */
1092 edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1093 DESC_JOB_IO_LEN, GFP_DMA | flags);
1094 if (!edesc) {
1095 dev_err(jrdev, "could not allocate extended descriptor\n");
1096 return -ENOMEM;
1097 }
1098 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1099 DESC_JOB_IO_LEN;
1100 edesc->src_nents = src_nents;
1101 edesc->chained = chained;
1102
1103 sh_len = desc_len(sh_desc);
1104 desc = edesc->hw_desc;
1105 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1106
1107 if (src_nents) {
1108 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1109 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1110 sec4_sg_bytes, DMA_TO_DEVICE);
1111 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1112 dev_err(jrdev, "unable to map S/G table\n");
1113 return -ENOMEM;
1114 }
1115 src_dma = edesc->sec4_sg_dma;
1116 options = LDST_SGF;
1117 } else {
1118 src_dma = sg_dma_address(req->src);
1119 options = 0;
1120 }
1121 append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1122
1123 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1124 digestsize);
1125 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1126 dev_err(jrdev, "unable to map dst\n");
1127 return -ENOMEM;
1128 }
1129
1130 #ifdef DEBUG
1131 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1132 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1133 #endif
1134
1135 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1136 if (!ret) {
1137 ret = -EINPROGRESS;
1138 } else {
1139 ahash_unmap(jrdev, edesc, req, digestsize);
1140 kfree(edesc);
1141 }
1142
1143 return ret;
1144 }
1145
1146 /* submit ahash final if it the first job descriptor */
1147 static int ahash_final_no_ctx(struct ahash_request *req)
1148 {
1149 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1150 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1151 struct caam_hash_state *state = ahash_request_ctx(req);
1152 struct device *jrdev = ctx->jrdev;
1153 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1154 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1155 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1156 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1157 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1158 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1159 int digestsize = crypto_ahash_digestsize(ahash);
1160 struct ahash_edesc *edesc;
1161 int ret = 0;
1162 int sh_len;
1163
1164 /* allocate space for base edesc and hw desc commands, link tables */
1165 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1166 GFP_DMA | flags);
1167 if (!edesc) {
1168 dev_err(jrdev, "could not allocate extended descriptor\n");
1169 return -ENOMEM;
1170 }
1171
1172 sh_len = desc_len(sh_desc);
1173 desc = edesc->hw_desc;
1174 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1175
1176 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1177 if (dma_mapping_error(jrdev, state->buf_dma)) {
1178 dev_err(jrdev, "unable to map src\n");
1179 return -ENOMEM;
1180 }
1181
1182 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1183
1184 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1185 digestsize);
1186 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1187 dev_err(jrdev, "unable to map dst\n");
1188 return -ENOMEM;
1189 }
1190 edesc->src_nents = 0;
1191
1192 #ifdef DEBUG
1193 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1194 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1195 #endif
1196
1197 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1198 if (!ret) {
1199 ret = -EINPROGRESS;
1200 } else {
1201 ahash_unmap(jrdev, edesc, req, digestsize);
1202 kfree(edesc);
1203 }
1204
1205 return ret;
1206 }
1207
1208 /* submit ahash update if it the first job descriptor after update */
1209 static int ahash_update_no_ctx(struct ahash_request *req)
1210 {
1211 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1212 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1213 struct caam_hash_state *state = ahash_request_ctx(req);
1214 struct device *jrdev = ctx->jrdev;
1215 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1216 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1217 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1218 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1219 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1220 int *next_buflen = state->current_buf ? &state->buflen_0 :
1221 &state->buflen_1;
1222 int in_len = *buflen + req->nbytes, to_hash;
1223 int sec4_sg_bytes, src_nents;
1224 struct ahash_edesc *edesc;
1225 u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1226 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1227 bool chained = false;
1228 int ret = 0;
1229 int sh_len;
1230
1231 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1232 to_hash = in_len - *next_buflen;
1233
1234 if (to_hash) {
1235 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1236 &chained);
1237 sec4_sg_bytes = (1 + src_nents) *
1238 sizeof(struct sec4_sg_entry);
1239
1240 /*
1241 * allocate space for base edesc and hw desc commands,
1242 * link tables
1243 */
1244 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1245 sec4_sg_bytes, GFP_DMA | flags);
1246 if (!edesc) {
1247 dev_err(jrdev,
1248 "could not allocate extended descriptor\n");
1249 return -ENOMEM;
1250 }
1251
1252 edesc->src_nents = src_nents;
1253 edesc->chained = chained;
1254 edesc->sec4_sg_bytes = sec4_sg_bytes;
1255 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1256 DESC_JOB_IO_LEN;
1257
1258 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1259 buf, *buflen);
1260 src_map_to_sec4_sg(jrdev, req->src, src_nents,
1261 edesc->sec4_sg + 1, chained);
1262 if (*next_buflen) {
1263 sg_copy_part(next_buf, req->src, to_hash - *buflen,
1264 req->nbytes);
1265 state->current_buf = !state->current_buf;
1266 }
1267
1268 sh_len = desc_len(sh_desc);
1269 desc = edesc->hw_desc;
1270 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1271 HDR_REVERSE);
1272
1273 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1274 sec4_sg_bytes,
1275 DMA_TO_DEVICE);
1276 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1277 dev_err(jrdev, "unable to map S/G table\n");
1278 return -ENOMEM;
1279 }
1280
1281 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1282
1283 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1284 if (ret)
1285 return ret;
1286
1287 #ifdef DEBUG
1288 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1289 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1290 desc_bytes(desc), 1);
1291 #endif
1292
1293 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1294 if (!ret) {
1295 ret = -EINPROGRESS;
1296 state->update = ahash_update_ctx;
1297 state->finup = ahash_finup_ctx;
1298 state->final = ahash_final_ctx;
1299 } else {
1300 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1301 DMA_TO_DEVICE);
1302 kfree(edesc);
1303 }
1304 } else if (*next_buflen) {
1305 sg_copy(buf + *buflen, req->src, req->nbytes);
1306 *buflen = *next_buflen;
1307 *next_buflen = 0;
1308 }
1309 #ifdef DEBUG
1310 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1311 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1312 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1313 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1314 *next_buflen, 1);
1315 #endif
1316
1317 return ret;
1318 }
1319
1320 /* submit ahash finup if it the first job descriptor after update */
1321 static int ahash_finup_no_ctx(struct ahash_request *req)
1322 {
1323 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1324 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1325 struct caam_hash_state *state = ahash_request_ctx(req);
1326 struct device *jrdev = ctx->jrdev;
1327 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1328 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1329 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1330 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1331 int last_buflen = state->current_buf ? state->buflen_0 :
1332 state->buflen_1;
1333 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1334 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1335 int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1336 int digestsize = crypto_ahash_digestsize(ahash);
1337 struct ahash_edesc *edesc;
1338 bool chained = false;
1339 int sh_len;
1340 int ret = 0;
1341
1342 src_nents = __sg_count(req->src, req->nbytes, &chained);
1343 sec4_sg_src_index = 2;
1344 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1345 sizeof(struct sec4_sg_entry);
1346
1347 /* allocate space for base edesc and hw desc commands, link tables */
1348 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1349 sec4_sg_bytes, GFP_DMA | flags);
1350 if (!edesc) {
1351 dev_err(jrdev, "could not allocate extended descriptor\n");
1352 return -ENOMEM;
1353 }
1354
1355 sh_len = desc_len(sh_desc);
1356 desc = edesc->hw_desc;
1357 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1358
1359 edesc->src_nents = src_nents;
1360 edesc->chained = chained;
1361 edesc->sec4_sg_bytes = sec4_sg_bytes;
1362 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1363 DESC_JOB_IO_LEN;
1364
1365 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1366 state->buf_dma, buflen,
1367 last_buflen);
1368
1369 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1370 chained);
1371
1372 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1373 sec4_sg_bytes, DMA_TO_DEVICE);
1374 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1375 dev_err(jrdev, "unable to map S/G table\n");
1376 return -ENOMEM;
1377 }
1378
1379 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1380 req->nbytes, LDST_SGF);
1381
1382 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1383 digestsize);
1384 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1385 dev_err(jrdev, "unable to map dst\n");
1386 return -ENOMEM;
1387 }
1388
1389 #ifdef DEBUG
1390 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1391 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1392 #endif
1393
1394 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1395 if (!ret) {
1396 ret = -EINPROGRESS;
1397 } else {
1398 ahash_unmap(jrdev, edesc, req, digestsize);
1399 kfree(edesc);
1400 }
1401
1402 return ret;
1403 }
1404
1405 /* submit first update job descriptor after init */
1406 static int ahash_update_first(struct ahash_request *req)
1407 {
1408 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1409 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1410 struct caam_hash_state *state = ahash_request_ctx(req);
1411 struct device *jrdev = ctx->jrdev;
1412 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1413 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1414 u8 *next_buf = state->buf_0 + state->current_buf *
1415 CAAM_MAX_HASH_BLOCK_SIZE;
1416 int *next_buflen = &state->buflen_0 + state->current_buf;
1417 int to_hash;
1418 u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1419 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1420 int sec4_sg_bytes, src_nents;
1421 dma_addr_t src_dma;
1422 u32 options;
1423 struct ahash_edesc *edesc;
1424 bool chained = false;
1425 int ret = 0;
1426 int sh_len;
1427
1428 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1429 1);
1430 to_hash = req->nbytes - *next_buflen;
1431
1432 if (to_hash) {
1433 src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1434 &chained);
1435 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1436 DMA_TO_DEVICE, chained);
1437 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1438
1439 /*
1440 * allocate space for base edesc and hw desc commands,
1441 * link tables
1442 */
1443 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1444 sec4_sg_bytes, GFP_DMA | flags);
1445 if (!edesc) {
1446 dev_err(jrdev,
1447 "could not allocate extended descriptor\n");
1448 return -ENOMEM;
1449 }
1450
1451 edesc->src_nents = src_nents;
1452 edesc->chained = chained;
1453 edesc->sec4_sg_bytes = sec4_sg_bytes;
1454 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1455 DESC_JOB_IO_LEN;
1456
1457 if (src_nents) {
1458 sg_to_sec4_sg_last(req->src, src_nents,
1459 edesc->sec4_sg, 0);
1460 edesc->sec4_sg_dma = dma_map_single(jrdev,
1461 edesc->sec4_sg,
1462 sec4_sg_bytes,
1463 DMA_TO_DEVICE);
1464 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1465 dev_err(jrdev, "unable to map S/G table\n");
1466 return -ENOMEM;
1467 }
1468 src_dma = edesc->sec4_sg_dma;
1469 options = LDST_SGF;
1470 } else {
1471 src_dma = sg_dma_address(req->src);
1472 options = 0;
1473 }
1474
1475 if (*next_buflen)
1476 sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
1477
1478 sh_len = desc_len(sh_desc);
1479 desc = edesc->hw_desc;
1480 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1481 HDR_REVERSE);
1482
1483 append_seq_in_ptr(desc, src_dma, to_hash, options);
1484
1485 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1486 if (ret)
1487 return ret;
1488
1489 #ifdef DEBUG
1490 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1491 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1492 desc_bytes(desc), 1);
1493 #endif
1494
1495 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1496 req);
1497 if (!ret) {
1498 ret = -EINPROGRESS;
1499 state->update = ahash_update_ctx;
1500 state->finup = ahash_finup_ctx;
1501 state->final = ahash_final_ctx;
1502 } else {
1503 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1504 DMA_TO_DEVICE);
1505 kfree(edesc);
1506 }
1507 } else if (*next_buflen) {
1508 state->update = ahash_update_no_ctx;
1509 state->finup = ahash_finup_no_ctx;
1510 state->final = ahash_final_no_ctx;
1511 sg_copy(next_buf, req->src, req->nbytes);
1512 }
1513 #ifdef DEBUG
1514 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1515 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1516 *next_buflen, 1);
1517 #endif
1518
1519 return ret;
1520 }
1521
1522 static int ahash_finup_first(struct ahash_request *req)
1523 {
1524 return ahash_digest(req);
1525 }
1526
1527 static int ahash_init(struct ahash_request *req)
1528 {
1529 struct caam_hash_state *state = ahash_request_ctx(req);
1530
1531 state->update = ahash_update_first;
1532 state->finup = ahash_finup_first;
1533 state->final = ahash_final_no_ctx;
1534
1535 state->current_buf = 0;
1536
1537 return 0;
1538 }
1539
1540 static int ahash_update(struct ahash_request *req)
1541 {
1542 struct caam_hash_state *state = ahash_request_ctx(req);
1543
1544 return state->update(req);
1545 }
1546
1547 static int ahash_finup(struct ahash_request *req)
1548 {
1549 struct caam_hash_state *state = ahash_request_ctx(req);
1550
1551 return state->finup(req);
1552 }
1553
1554 static int ahash_final(struct ahash_request *req)
1555 {
1556 struct caam_hash_state *state = ahash_request_ctx(req);
1557
1558 return state->final(req);
1559 }
1560
1561 static int ahash_export(struct ahash_request *req, void *out)
1562 {
1563 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1564 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1565 struct caam_hash_state *state = ahash_request_ctx(req);
1566
1567 memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1568 memcpy(out + sizeof(struct caam_hash_ctx), state,
1569 sizeof(struct caam_hash_state));
1570 return 0;
1571 }
1572
1573 static int ahash_import(struct ahash_request *req, const void *in)
1574 {
1575 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1576 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1577 struct caam_hash_state *state = ahash_request_ctx(req);
1578
1579 memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1580 memcpy(state, in + sizeof(struct caam_hash_ctx),
1581 sizeof(struct caam_hash_state));
1582 return 0;
1583 }
1584
1585 struct caam_hash_template {
1586 char name[CRYPTO_MAX_ALG_NAME];
1587 char driver_name[CRYPTO_MAX_ALG_NAME];
1588 char hmac_name[CRYPTO_MAX_ALG_NAME];
1589 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1590 unsigned int blocksize;
1591 struct ahash_alg template_ahash;
1592 u32 alg_type;
1593 u32 alg_op;
1594 };
1595
1596 /* ahash descriptors */
1597 static struct caam_hash_template driver_hash[] = {
1598 {
1599 .name = "sha1",
1600 .driver_name = "sha1-caam",
1601 .hmac_name = "hmac(sha1)",
1602 .hmac_driver_name = "hmac-sha1-caam",
1603 .blocksize = SHA1_BLOCK_SIZE,
1604 .template_ahash = {
1605 .init = ahash_init,
1606 .update = ahash_update,
1607 .final = ahash_final,
1608 .finup = ahash_finup,
1609 .digest = ahash_digest,
1610 .export = ahash_export,
1611 .import = ahash_import,
1612 .setkey = ahash_setkey,
1613 .halg = {
1614 .digestsize = SHA1_DIGEST_SIZE,
1615 },
1616 },
1617 .alg_type = OP_ALG_ALGSEL_SHA1,
1618 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1619 }, {
1620 .name = "sha224",
1621 .driver_name = "sha224-caam",
1622 .hmac_name = "hmac(sha224)",
1623 .hmac_driver_name = "hmac-sha224-caam",
1624 .blocksize = SHA224_BLOCK_SIZE,
1625 .template_ahash = {
1626 .init = ahash_init,
1627 .update = ahash_update,
1628 .final = ahash_final,
1629 .finup = ahash_finup,
1630 .digest = ahash_digest,
1631 .export = ahash_export,
1632 .import = ahash_import,
1633 .setkey = ahash_setkey,
1634 .halg = {
1635 .digestsize = SHA224_DIGEST_SIZE,
1636 },
1637 },
1638 .alg_type = OP_ALG_ALGSEL_SHA224,
1639 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1640 }, {
1641 .name = "sha256",
1642 .driver_name = "sha256-caam",
1643 .hmac_name = "hmac(sha256)",
1644 .hmac_driver_name = "hmac-sha256-caam",
1645 .blocksize = SHA256_BLOCK_SIZE,
1646 .template_ahash = {
1647 .init = ahash_init,
1648 .update = ahash_update,
1649 .final = ahash_final,
1650 .finup = ahash_finup,
1651 .digest = ahash_digest,
1652 .export = ahash_export,
1653 .import = ahash_import,
1654 .setkey = ahash_setkey,
1655 .halg = {
1656 .digestsize = SHA256_DIGEST_SIZE,
1657 },
1658 },
1659 .alg_type = OP_ALG_ALGSEL_SHA256,
1660 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1661 }, {
1662 .name = "sha384",
1663 .driver_name = "sha384-caam",
1664 .hmac_name = "hmac(sha384)",
1665 .hmac_driver_name = "hmac-sha384-caam",
1666 .blocksize = SHA384_BLOCK_SIZE,
1667 .template_ahash = {
1668 .init = ahash_init,
1669 .update = ahash_update,
1670 .final = ahash_final,
1671 .finup = ahash_finup,
1672 .digest = ahash_digest,
1673 .export = ahash_export,
1674 .import = ahash_import,
1675 .setkey = ahash_setkey,
1676 .halg = {
1677 .digestsize = SHA384_DIGEST_SIZE,
1678 },
1679 },
1680 .alg_type = OP_ALG_ALGSEL_SHA384,
1681 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1682 }, {
1683 .name = "sha512",
1684 .driver_name = "sha512-caam",
1685 .hmac_name = "hmac(sha512)",
1686 .hmac_driver_name = "hmac-sha512-caam",
1687 .blocksize = SHA512_BLOCK_SIZE,
1688 .template_ahash = {
1689 .init = ahash_init,
1690 .update = ahash_update,
1691 .final = ahash_final,
1692 .finup = ahash_finup,
1693 .digest = ahash_digest,
1694 .export = ahash_export,
1695 .import = ahash_import,
1696 .setkey = ahash_setkey,
1697 .halg = {
1698 .digestsize = SHA512_DIGEST_SIZE,
1699 },
1700 },
1701 .alg_type = OP_ALG_ALGSEL_SHA512,
1702 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1703 }, {
1704 .name = "md5",
1705 .driver_name = "md5-caam",
1706 .hmac_name = "hmac(md5)",
1707 .hmac_driver_name = "hmac-md5-caam",
1708 .blocksize = MD5_BLOCK_WORDS * 4,
1709 .template_ahash = {
1710 .init = ahash_init,
1711 .update = ahash_update,
1712 .final = ahash_final,
1713 .finup = ahash_finup,
1714 .digest = ahash_digest,
1715 .export = ahash_export,
1716 .import = ahash_import,
1717 .setkey = ahash_setkey,
1718 .halg = {
1719 .digestsize = MD5_DIGEST_SIZE,
1720 },
1721 },
1722 .alg_type = OP_ALG_ALGSEL_MD5,
1723 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1724 },
1725 };
1726
1727 struct caam_hash_alg {
1728 struct list_head entry;
1729 int alg_type;
1730 int alg_op;
1731 struct ahash_alg ahash_alg;
1732 };
1733
1734 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1735 {
1736 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1737 struct crypto_alg *base = tfm->__crt_alg;
1738 struct hash_alg_common *halg =
1739 container_of(base, struct hash_alg_common, base);
1740 struct ahash_alg *alg =
1741 container_of(halg, struct ahash_alg, halg);
1742 struct caam_hash_alg *caam_hash =
1743 container_of(alg, struct caam_hash_alg, ahash_alg);
1744 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1745 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1746 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1747 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1748 HASH_MSG_LEN + 32,
1749 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1750 HASH_MSG_LEN + 64,
1751 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1752 int ret = 0;
1753
1754 /*
1755 * Get a Job ring from Job Ring driver to ensure in-order
1756 * crypto request processing per tfm
1757 */
1758 ctx->jrdev = caam_jr_alloc();
1759 if (IS_ERR(ctx->jrdev)) {
1760 pr_err("Job Ring Device allocation for transform failed\n");
1761 return PTR_ERR(ctx->jrdev);
1762 }
1763 /* copy descriptor header template value */
1764 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1765 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1766
1767 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1768 OP_ALG_ALGSEL_SHIFT];
1769
1770 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1771 sizeof(struct caam_hash_state));
1772
1773 ret = ahash_set_sh_desc(ahash);
1774
1775 return ret;
1776 }
1777
1778 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1779 {
1780 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1781
1782 if (ctx->sh_desc_update_dma &&
1783 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1784 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1785 desc_bytes(ctx->sh_desc_update),
1786 DMA_TO_DEVICE);
1787 if (ctx->sh_desc_update_first_dma &&
1788 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1789 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1790 desc_bytes(ctx->sh_desc_update_first),
1791 DMA_TO_DEVICE);
1792 if (ctx->sh_desc_fin_dma &&
1793 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1794 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1795 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1796 if (ctx->sh_desc_digest_dma &&
1797 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1798 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1799 desc_bytes(ctx->sh_desc_digest),
1800 DMA_TO_DEVICE);
1801 if (ctx->sh_desc_finup_dma &&
1802 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1803 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1804 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1805
1806 caam_jr_free(ctx->jrdev);
1807 }
1808
1809 static void __exit caam_algapi_hash_exit(void)
1810 {
1811 struct caam_hash_alg *t_alg, *n;
1812
1813 if (!hash_list.next)
1814 return;
1815
1816 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1817 crypto_unregister_ahash(&t_alg->ahash_alg);
1818 list_del(&t_alg->entry);
1819 kfree(t_alg);
1820 }
1821 }
1822
1823 static struct caam_hash_alg *
1824 caam_hash_alloc(struct caam_hash_template *template,
1825 bool keyed)
1826 {
1827 struct caam_hash_alg *t_alg;
1828 struct ahash_alg *halg;
1829 struct crypto_alg *alg;
1830
1831 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1832 if (!t_alg) {
1833 pr_err("failed to allocate t_alg\n");
1834 return ERR_PTR(-ENOMEM);
1835 }
1836
1837 t_alg->ahash_alg = template->template_ahash;
1838 halg = &t_alg->ahash_alg;
1839 alg = &halg->halg.base;
1840
1841 if (keyed) {
1842 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1843 template->hmac_name);
1844 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1845 template->hmac_driver_name);
1846 } else {
1847 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1848 template->name);
1849 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1850 template->driver_name);
1851 }
1852 alg->cra_module = THIS_MODULE;
1853 alg->cra_init = caam_hash_cra_init;
1854 alg->cra_exit = caam_hash_cra_exit;
1855 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1856 alg->cra_priority = CAAM_CRA_PRIORITY;
1857 alg->cra_blocksize = template->blocksize;
1858 alg->cra_alignmask = 0;
1859 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1860 alg->cra_type = &crypto_ahash_type;
1861
1862 t_alg->alg_type = template->alg_type;
1863 t_alg->alg_op = template->alg_op;
1864
1865 return t_alg;
1866 }
1867
1868 static int __init caam_algapi_hash_init(void)
1869 {
1870 struct device_node *dev_node;
1871 struct platform_device *pdev;
1872 struct device *ctrldev;
1873 void *priv;
1874 int i = 0, err = 0;
1875
1876 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1877 if (!dev_node) {
1878 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1879 if (!dev_node)
1880 return -ENODEV;
1881 }
1882
1883 pdev = of_find_device_by_node(dev_node);
1884 if (!pdev) {
1885 of_node_put(dev_node);
1886 return -ENODEV;
1887 }
1888
1889 ctrldev = &pdev->dev;
1890 priv = dev_get_drvdata(ctrldev);
1891 of_node_put(dev_node);
1892
1893 /*
1894 * If priv is NULL, it's probably because the caam driver wasn't
1895 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1896 */
1897 if (!priv)
1898 return -ENODEV;
1899
1900 INIT_LIST_HEAD(&hash_list);
1901
1902 /* register crypto algorithms the device supports */
1903 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1904 /* TODO: check if h/w supports alg */
1905 struct caam_hash_alg *t_alg;
1906
1907 /* register hmac version */
1908 t_alg = caam_hash_alloc(&driver_hash[i], true);
1909 if (IS_ERR(t_alg)) {
1910 err = PTR_ERR(t_alg);
1911 pr_warn("%s alg allocation failed\n",
1912 driver_hash[i].driver_name);
1913 continue;
1914 }
1915
1916 err = crypto_register_ahash(&t_alg->ahash_alg);
1917 if (err) {
1918 pr_warn("%s alg registration failed\n",
1919 t_alg->ahash_alg.halg.base.cra_driver_name);
1920 kfree(t_alg);
1921 } else
1922 list_add_tail(&t_alg->entry, &hash_list);
1923
1924 /* register unkeyed version */
1925 t_alg = caam_hash_alloc(&driver_hash[i], false);
1926 if (IS_ERR(t_alg)) {
1927 err = PTR_ERR(t_alg);
1928 pr_warn("%s alg allocation failed\n",
1929 driver_hash[i].driver_name);
1930 continue;
1931 }
1932
1933 err = crypto_register_ahash(&t_alg->ahash_alg);
1934 if (err) {
1935 pr_warn("%s alg registration failed\n",
1936 t_alg->ahash_alg.halg.base.cra_driver_name);
1937 kfree(t_alg);
1938 } else
1939 list_add_tail(&t_alg->entry, &hash_list);
1940 }
1941
1942 return err;
1943 }
1944
1945 module_init(caam_algapi_hash_init);
1946 module_exit(caam_algapi_hash_exit);
1947
1948 MODULE_LICENSE("GPL");
1949 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1950 MODULE_AUTHOR("Freescale Semiconductor - NMG");
This page took 0.079019 seconds and 6 git commands to generate.