crypto: caam - add support for aead null encryption
[deliverable/linux.git] / drivers / crypto / caam / caamalg.c
CommitLineData
8e8ec596
KP
1/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
6ec47334 40 * | (output length) |
8e8ec596
KP
41 * | SEQ_IN_PTR |
42 * | (input buffer) |
6ec47334 43 * | (input length) |
8e8ec596
KP
44 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
a299c837 54#include "sg_sw_sec4.h"
4c1ec1f9 55#include "key_gen.h"
8e8ec596
KP
56
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 SHA512_DIGEST_SIZE * 2)
64/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65#define CAAM_MAX_IV_LENGTH 16
66
4427b1b4 67/* length of descriptors text */
1acebad3 68#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
4464a7d4
HG
69#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
70#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
1acebad3
YK
71#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72
ae4a825f
HG
73#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
74#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
75#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
76
acdca31d
YK
77#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
78#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
79 20 * CAAM_CMD_SZ)
80#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
81 15 * CAAM_CMD_SZ)
82
1acebad3
YK
83#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
84 CAAM_MAX_KEY_SIZE)
85#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
4427b1b4 86
8e8ec596
KP
87#ifdef DEBUG
88/* for print_hex_dumps with line references */
8e8ec596
KP
89#define debug(format, arg...) printk(format, arg)
90#else
91#define debug(format, arg...)
92#endif
cfc6f11b 93static struct list_head alg_list;
8e8ec596 94
1acebad3
YK
95/* Set DK bit in class 1 operation if shared */
96static inline void append_dec_op1(u32 *desc, u32 type)
97{
98 u32 *jump_cmd, *uncond_jump_cmd;
99
100 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
101 append_operation(desc, type | OP_ALG_AS_INITFINAL |
102 OP_ALG_DECRYPT);
103 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
104 set_jump_tgt_here(desc, jump_cmd);
105 append_operation(desc, type | OP_ALG_AS_INITFINAL |
106 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
107 set_jump_tgt_here(desc, uncond_jump_cmd);
108}
109
1acebad3
YK
110/*
111 * For aead functions, read payload and write payload,
112 * both of which are specified in req->src and req->dst
113 */
114static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
115{
ae4a825f 116 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
1acebad3
YK
117 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
118 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
1acebad3
YK
119}
120
121/*
122 * For aead encrypt and decrypt, read iv for both classes
123 */
124static inline void aead_append_ld_iv(u32 *desc, int ivsize)
125{
126 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
127 LDST_CLASS_1_CCB | ivsize);
128 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
129}
130
acdca31d
YK
131/*
132 * For ablkcipher encrypt and decrypt, read from req->src and
133 * write to req->dst
134 */
135static inline void ablkcipher_append_src_dst(u32 *desc)
136{
70d793cc
KP
137 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
138 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
139 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
140 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
141 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
acdca31d
YK
142}
143
1acebad3
YK
144/*
145 * If all data, including src (with assoc and iv) or dst (with iv only) are
146 * contiguous
147 */
148#define GIV_SRC_CONTIG 1
149#define GIV_DST_CONTIG (1 << 1)
150
8e8ec596
KP
151/*
152 * per-session context
153 */
154struct caam_ctx {
155 struct device *jrdev;
1acebad3
YK
156 u32 sh_desc_enc[DESC_MAX_USED_LEN];
157 u32 sh_desc_dec[DESC_MAX_USED_LEN];
158 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
159 dma_addr_t sh_desc_enc_dma;
160 dma_addr_t sh_desc_dec_dma;
161 dma_addr_t sh_desc_givenc_dma;
8e8ec596
KP
162 u32 class1_alg_type;
163 u32 class2_alg_type;
164 u32 alg_op;
1acebad3 165 u8 key[CAAM_MAX_KEY_SIZE];
885e9e2f 166 dma_addr_t key_dma;
8e8ec596 167 unsigned int enckeylen;
8e8ec596
KP
168 unsigned int split_key_len;
169 unsigned int split_key_pad_len;
170 unsigned int authsize;
171};
172
1acebad3
YK
173static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
174 int keys_fit_inline)
175{
176 if (keys_fit_inline) {
177 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
178 ctx->split_key_len, CLASS_2 |
179 KEY_DEST_MDHA_SPLIT | KEY_ENC);
180 append_key_as_imm(desc, (void *)ctx->key +
181 ctx->split_key_pad_len, ctx->enckeylen,
182 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
183 } else {
184 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
185 KEY_DEST_MDHA_SPLIT | KEY_ENC);
186 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
187 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
188 }
189}
190
191static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
192 int keys_fit_inline)
193{
194 u32 *key_jump_cmd;
195
61bb86bb 196 init_sh_desc(desc, HDR_SHARE_SERIAL);
1acebad3
YK
197
198 /* Skip if already shared */
199 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
200 JUMP_COND_SHRD);
201
202 append_key_aead(desc, ctx, keys_fit_inline);
203
204 set_jump_tgt_here(desc, key_jump_cmd);
1acebad3
YK
205}
206
ae4a825f
HG
207static int aead_null_set_sh_desc(struct crypto_aead *aead)
208{
209 struct aead_tfm *tfm = &aead->base.crt_aead;
210 struct caam_ctx *ctx = crypto_aead_ctx(aead);
211 struct device *jrdev = ctx->jrdev;
212 bool keys_fit_inline = false;
213 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
214 u32 *desc;
215
216 /*
217 * Job Descriptor and Shared Descriptors
218 * must all fit into the 64-word Descriptor h/w Buffer
219 */
220 if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
221 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
222 keys_fit_inline = true;
223
224 /* aead_encrypt shared descriptor */
225 desc = ctx->sh_desc_enc;
226
227 init_sh_desc(desc, HDR_SHARE_SERIAL);
228
229 /* Skip if already shared */
230 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
231 JUMP_COND_SHRD);
232 if (keys_fit_inline)
233 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
234 ctx->split_key_len, CLASS_2 |
235 KEY_DEST_MDHA_SPLIT | KEY_ENC);
236 else
237 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
238 KEY_DEST_MDHA_SPLIT | KEY_ENC);
239 set_jump_tgt_here(desc, key_jump_cmd);
240
241 /* cryptlen = seqoutlen - authsize */
242 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
243
244 /*
245 * NULL encryption; IV is zero
246 * assoclen = (assoclen + cryptlen) - cryptlen
247 */
248 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
249
250 /* read assoc before reading payload */
251 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
252 KEY_VLF);
253
254 /* Prepare to read and write cryptlen bytes */
255 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
256 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
257
258 /*
259 * MOVE_LEN opcode is not available in all SEC HW revisions,
260 * thus need to do some magic, i.e. self-patch the descriptor
261 * buffer.
262 */
263 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
264 MOVE_DEST_MATH3 |
265 (0x6 << MOVE_LEN_SHIFT));
266 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
267 MOVE_DEST_DESCBUF |
268 MOVE_WAITCOMP |
269 (0x8 << MOVE_LEN_SHIFT));
270
271 /* Class 2 operation */
272 append_operation(desc, ctx->class2_alg_type |
273 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
274
275 /* Read and write cryptlen bytes */
276 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
277
278 set_move_tgt_here(desc, read_move_cmd);
279 set_move_tgt_here(desc, write_move_cmd);
280 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
281 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
282 MOVE_AUX_LS);
283
284 /* Write ICV */
285 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
286 LDST_SRCDST_BYTE_CONTEXT);
287
288 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
289 desc_bytes(desc),
290 DMA_TO_DEVICE);
291 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
292 dev_err(jrdev, "unable to map shared descriptor\n");
293 return -ENOMEM;
294 }
295#ifdef DEBUG
296 print_hex_dump(KERN_ERR,
297 "aead null enc shdesc@"__stringify(__LINE__)": ",
298 DUMP_PREFIX_ADDRESS, 16, 4, desc,
299 desc_bytes(desc), 1);
300#endif
301
302 /*
303 * Job Descriptor and Shared Descriptors
304 * must all fit into the 64-word Descriptor h/w Buffer
305 */
306 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
307 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
308 keys_fit_inline = true;
309
310 desc = ctx->sh_desc_dec;
311
312 /* aead_decrypt shared descriptor */
313 init_sh_desc(desc, HDR_SHARE_SERIAL);
314
315 /* Skip if already shared */
316 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
317 JUMP_COND_SHRD);
318 if (keys_fit_inline)
319 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
320 ctx->split_key_len, CLASS_2 |
321 KEY_DEST_MDHA_SPLIT | KEY_ENC);
322 else
323 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
324 KEY_DEST_MDHA_SPLIT | KEY_ENC);
325 set_jump_tgt_here(desc, key_jump_cmd);
326
327 /* Class 2 operation */
328 append_operation(desc, ctx->class2_alg_type |
329 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
330
331 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
332 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
333 ctx->authsize + tfm->ivsize);
334 /* assoclen = (assoclen + cryptlen) - cryptlen */
335 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
336 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
337
338 /* read assoc before reading payload */
339 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
340 KEY_VLF);
341
342 /* Prepare to read and write cryptlen bytes */
343 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
344 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
345
346 /*
347 * MOVE_LEN opcode is not available in all SEC HW revisions,
348 * thus need to do some magic, i.e. self-patch the descriptor
349 * buffer.
350 */
351 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
352 MOVE_DEST_MATH2 |
353 (0x6 << MOVE_LEN_SHIFT));
354 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
355 MOVE_DEST_DESCBUF |
356 MOVE_WAITCOMP |
357 (0x8 << MOVE_LEN_SHIFT));
358
359 /* Read and write cryptlen bytes */
360 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
361
362 /*
363 * Insert a NOP here, since we need at least 4 instructions between
364 * code patching the descriptor buffer and the location being patched.
365 */
366 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
367 set_jump_tgt_here(desc, jump_cmd);
368
369 set_move_tgt_here(desc, read_move_cmd);
370 set_move_tgt_here(desc, write_move_cmd);
371 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
372 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
373 MOVE_AUX_LS);
374 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
375
376 /* Load ICV */
377 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
378 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
379
380 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
381 desc_bytes(desc),
382 DMA_TO_DEVICE);
383 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
384 dev_err(jrdev, "unable to map shared descriptor\n");
385 return -ENOMEM;
386 }
387#ifdef DEBUG
388 print_hex_dump(KERN_ERR,
389 "aead null dec shdesc@"__stringify(__LINE__)": ",
390 DUMP_PREFIX_ADDRESS, 16, 4, desc,
391 desc_bytes(desc), 1);
392#endif
393
394 return 0;
395}
396
1acebad3
YK
397static int aead_set_sh_desc(struct crypto_aead *aead)
398{
399 struct aead_tfm *tfm = &aead->base.crt_aead;
400 struct caam_ctx *ctx = crypto_aead_ctx(aead);
401 struct device *jrdev = ctx->jrdev;
2af8f4a2 402 bool keys_fit_inline = false;
1acebad3
YK
403 u32 geniv, moveiv;
404 u32 *desc;
405
ae4a825f 406 if (!ctx->authsize)
1acebad3
YK
407 return 0;
408
ae4a825f
HG
409 /* NULL encryption / decryption */
410 if (!ctx->enckeylen)
411 return aead_null_set_sh_desc(aead);
412
1acebad3
YK
413 /*
414 * Job Descriptor and Shared Descriptors
415 * must all fit into the 64-word Descriptor h/w Buffer
416 */
417 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
418 ctx->split_key_pad_len + ctx->enckeylen <=
419 CAAM_DESC_BYTES_MAX)
2af8f4a2 420 keys_fit_inline = true;
1acebad3
YK
421
422 /* aead_encrypt shared descriptor */
423 desc = ctx->sh_desc_enc;
424
425 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
426
427 /* Class 2 operation */
428 append_operation(desc, ctx->class2_alg_type |
429 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
430
431 /* cryptlen = seqoutlen - authsize */
432 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
433
434 /* assoclen + cryptlen = seqinlen - ivsize */
435 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
436
4464a7d4 437 /* assoclen = (assoclen + cryptlen) - cryptlen */
1acebad3
YK
438 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
439
440 /* read assoc before reading payload */
441 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
442 KEY_VLF);
443 aead_append_ld_iv(desc, tfm->ivsize);
444
445 /* Class 1 operation */
446 append_operation(desc, ctx->class1_alg_type |
447 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
448
449 /* Read and write cryptlen bytes */
450 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
451 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
452 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
453
454 /* Write ICV */
455 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
456 LDST_SRCDST_BYTE_CONTEXT);
457
458 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
459 desc_bytes(desc),
460 DMA_TO_DEVICE);
461 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
462 dev_err(jrdev, "unable to map shared descriptor\n");
463 return -ENOMEM;
464 }
465#ifdef DEBUG
514df281 466 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
1acebad3
YK
467 DUMP_PREFIX_ADDRESS, 16, 4, desc,
468 desc_bytes(desc), 1);
469#endif
470
471 /*
472 * Job Descriptor and Shared Descriptors
473 * must all fit into the 64-word Descriptor h/w Buffer
474 */
475 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
476 ctx->split_key_pad_len + ctx->enckeylen <=
477 CAAM_DESC_BYTES_MAX)
2af8f4a2 478 keys_fit_inline = true;
1acebad3 479
1acebad3 480 /* aead_decrypt shared descriptor */
4464a7d4 481 desc = ctx->sh_desc_dec;
1acebad3 482
4464a7d4 483 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
1acebad3
YK
484
485 /* Class 2 operation */
486 append_operation(desc, ctx->class2_alg_type |
487 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
488
4464a7d4 489 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
1acebad3 490 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
ae4a825f 491 ctx->authsize + tfm->ivsize);
1acebad3
YK
492 /* assoclen = (assoclen + cryptlen) - cryptlen */
493 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
494 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
495
496 /* read assoc before reading payload */
497 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
498 KEY_VLF);
499
500 aead_append_ld_iv(desc, tfm->ivsize);
501
502 append_dec_op1(desc, ctx->class1_alg_type);
503
504 /* Read and write cryptlen bytes */
505 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
506 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
507 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
508
509 /* Load ICV */
510 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
511 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
1acebad3
YK
512
513 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
514 desc_bytes(desc),
515 DMA_TO_DEVICE);
516 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
517 dev_err(jrdev, "unable to map shared descriptor\n");
518 return -ENOMEM;
519 }
520#ifdef DEBUG
514df281 521 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
1acebad3
YK
522 DUMP_PREFIX_ADDRESS, 16, 4, desc,
523 desc_bytes(desc), 1);
524#endif
525
526 /*
527 * Job Descriptor and Shared Descriptors
528 * must all fit into the 64-word Descriptor h/w Buffer
529 */
530 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
531 ctx->split_key_pad_len + ctx->enckeylen <=
532 CAAM_DESC_BYTES_MAX)
2af8f4a2 533 keys_fit_inline = true;
1acebad3
YK
534
535 /* aead_givencrypt shared descriptor */
536 desc = ctx->sh_desc_givenc;
537
538 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
539
540 /* Generate IV */
541 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
542 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
543 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
544 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
545 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
546 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
547 append_move(desc, MOVE_SRC_INFIFO |
548 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
549 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
550
551 /* Copy IV to class 1 context */
552 append_move(desc, MOVE_SRC_CLASS1CTX |
553 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
554
555 /* Return to encryption */
556 append_operation(desc, ctx->class2_alg_type |
557 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
558
559 /* ivsize + cryptlen = seqoutlen - authsize */
560 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
561
562 /* assoclen = seqinlen - (ivsize + cryptlen) */
563 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
564
565 /* read assoc before reading payload */
566 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
567 KEY_VLF);
568
569 /* Copy iv from class 1 ctx to class 2 fifo*/
570 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
571 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
572 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
573 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
574 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
575 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
576
577 /* Class 1 operation */
578 append_operation(desc, ctx->class1_alg_type |
579 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
580
581 /* Will write ivsize + cryptlen */
582 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
583
584 /* Not need to reload iv */
585 append_seq_fifo_load(desc, tfm->ivsize,
586 FIFOLD_CLASS_SKIP);
587
588 /* Will read cryptlen */
589 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
590 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
591
592 /* Write ICV */
593 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
594 LDST_SRCDST_BYTE_CONTEXT);
595
596 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
597 desc_bytes(desc),
598 DMA_TO_DEVICE);
599 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
600 dev_err(jrdev, "unable to map shared descriptor\n");
601 return -ENOMEM;
602 }
603#ifdef DEBUG
514df281 604 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
1acebad3
YK
605 DUMP_PREFIX_ADDRESS, 16, 4, desc,
606 desc_bytes(desc), 1);
607#endif
608
609 return 0;
610}
611
0e479300 612static int aead_setauthsize(struct crypto_aead *authenc,
8e8ec596
KP
613 unsigned int authsize)
614{
615 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
616
617 ctx->authsize = authsize;
1acebad3 618 aead_set_sh_desc(authenc);
8e8ec596
KP
619
620 return 0;
621}
622
4c1ec1f9
YK
623static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
624 u32 authkeylen)
8e8ec596 625{
4c1ec1f9
YK
626 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
627 ctx->split_key_pad_len, key_in, authkeylen,
628 ctx->alg_op);
8e8ec596
KP
629}
630
0e479300 631static int aead_setkey(struct crypto_aead *aead,
8e8ec596
KP
632 const u8 *key, unsigned int keylen)
633{
634 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
635 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
636 struct caam_ctx *ctx = crypto_aead_ctx(aead);
637 struct device *jrdev = ctx->jrdev;
4e6e0b27 638 struct crypto_authenc_keys keys;
8e8ec596
KP
639 int ret = 0;
640
4e6e0b27 641 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
8e8ec596
KP
642 goto badkey;
643
644 /* Pick class 2 key length from algorithm submask */
645 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
646 OP_ALG_ALGSEL_SHIFT] * 2;
647 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
648
4e6e0b27
HG
649 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
650 goto badkey;
651
8e8ec596
KP
652#ifdef DEBUG
653 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
4e6e0b27
HG
654 keys.authkeylen + keys.enckeylen, keys.enckeylen,
655 keys.authkeylen);
8e8ec596
KP
656 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
657 ctx->split_key_len, ctx->split_key_pad_len);
514df281 658 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8e8ec596
KP
659 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
660#endif
8e8ec596 661
4e6e0b27 662 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
8e8ec596 663 if (ret) {
8e8ec596
KP
664 goto badkey;
665 }
666
667 /* postpend encryption key to auth split key */
4e6e0b27 668 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
8e8ec596 669
885e9e2f 670 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
4e6e0b27 671 keys.enckeylen, DMA_TO_DEVICE);
885e9e2f 672 if (dma_mapping_error(jrdev, ctx->key_dma)) {
8e8ec596 673 dev_err(jrdev, "unable to map key i/o memory\n");
8e8ec596
KP
674 return -ENOMEM;
675 }
676#ifdef DEBUG
514df281 677 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
8e8ec596 678 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
4e6e0b27 679 ctx->split_key_pad_len + keys.enckeylen, 1);
8e8ec596
KP
680#endif
681
4e6e0b27 682 ctx->enckeylen = keys.enckeylen;
8e8ec596 683
1acebad3 684 ret = aead_set_sh_desc(aead);
8e8ec596 685 if (ret) {
885e9e2f 686 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
4e6e0b27 687 keys.enckeylen, DMA_TO_DEVICE);
8e8ec596
KP
688 }
689
690 return ret;
691badkey:
692 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
693 return -EINVAL;
694}
695
acdca31d
YK
696static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
697 const u8 *key, unsigned int keylen)
698{
699 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
700 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
701 struct device *jrdev = ctx->jrdev;
702 int ret = 0;
4464a7d4 703 u32 *key_jump_cmd;
acdca31d
YK
704 u32 *desc;
705
706#ifdef DEBUG
514df281 707 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
acdca31d
YK
708 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
709#endif
710
711 memcpy(ctx->key, key, keylen);
712 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
713 DMA_TO_DEVICE);
714 if (dma_mapping_error(jrdev, ctx->key_dma)) {
715 dev_err(jrdev, "unable to map key i/o memory\n");
716 return -ENOMEM;
717 }
718 ctx->enckeylen = keylen;
719
720 /* ablkcipher_encrypt shared descriptor */
721 desc = ctx->sh_desc_enc;
61bb86bb 722 init_sh_desc(desc, HDR_SHARE_SERIAL);
acdca31d
YK
723 /* Skip if already shared */
724 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
725 JUMP_COND_SHRD);
726
727 /* Load class1 key only */
728 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
729 ctx->enckeylen, CLASS_1 |
730 KEY_DEST_CLASS_REG);
731
732 set_jump_tgt_here(desc, key_jump_cmd);
733
acdca31d
YK
734 /* Load iv */
735 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
736 LDST_CLASS_1_CCB | tfm->ivsize);
737
738 /* Load operation */
739 append_operation(desc, ctx->class1_alg_type |
740 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
741
742 /* Perform operation */
743 ablkcipher_append_src_dst(desc);
744
745 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
746 desc_bytes(desc),
747 DMA_TO_DEVICE);
748 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
749 dev_err(jrdev, "unable to map shared descriptor\n");
750 return -ENOMEM;
751 }
752#ifdef DEBUG
514df281
AP
753 print_hex_dump(KERN_ERR,
754 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
acdca31d
YK
755 DUMP_PREFIX_ADDRESS, 16, 4, desc,
756 desc_bytes(desc), 1);
757#endif
758 /* ablkcipher_decrypt shared descriptor */
759 desc = ctx->sh_desc_dec;
760
61bb86bb 761 init_sh_desc(desc, HDR_SHARE_SERIAL);
acdca31d
YK
762 /* Skip if already shared */
763 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
764 JUMP_COND_SHRD);
765
766 /* Load class1 key only */
767 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
768 ctx->enckeylen, CLASS_1 |
769 KEY_DEST_CLASS_REG);
770
acdca31d 771 set_jump_tgt_here(desc, key_jump_cmd);
acdca31d
YK
772
773 /* load IV */
774 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
775 LDST_CLASS_1_CCB | tfm->ivsize);
776
777 /* Choose operation */
778 append_dec_op1(desc, ctx->class1_alg_type);
779
780 /* Perform operation */
781 ablkcipher_append_src_dst(desc);
782
acdca31d
YK
783 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
784 desc_bytes(desc),
785 DMA_TO_DEVICE);
786 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
787 dev_err(jrdev, "unable to map shared descriptor\n");
788 return -ENOMEM;
789 }
790
791#ifdef DEBUG
514df281
AP
792 print_hex_dump(KERN_ERR,
793 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
acdca31d
YK
794 DUMP_PREFIX_ADDRESS, 16, 4, desc,
795 desc_bytes(desc), 1);
796#endif
797
798 return ret;
799}
800
8e8ec596 801/*
1acebad3
YK
802 * aead_edesc - s/w-extended aead descriptor
803 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
643b39b0 804 * @assoc_chained: if source is chained
8e8ec596 805 * @src_nents: number of segments in input scatterlist
643b39b0 806 * @src_chained: if source is chained
8e8ec596 807 * @dst_nents: number of segments in output scatterlist
643b39b0 808 * @dst_chained: if destination is chained
1acebad3 809 * @iv_dma: dma address of iv for checking continuity and link table
8e8ec596 810 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
a299c837
YK
811 * @sec4_sg_bytes: length of dma mapped sec4_sg space
812 * @sec4_sg_dma: bus physical mapped address of h/w link table
8e8ec596
KP
813 * @hw_desc: the h/w job descriptor followed by any referenced link tables
814 */
0e479300 815struct aead_edesc {
8e8ec596 816 int assoc_nents;
643b39b0 817 bool assoc_chained;
8e8ec596 818 int src_nents;
643b39b0 819 bool src_chained;
8e8ec596 820 int dst_nents;
643b39b0 821 bool dst_chained;
1acebad3 822 dma_addr_t iv_dma;
a299c837
YK
823 int sec4_sg_bytes;
824 dma_addr_t sec4_sg_dma;
825 struct sec4_sg_entry *sec4_sg;
8e8ec596
KP
826 u32 hw_desc[0];
827};
828
acdca31d
YK
829/*
830 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
831 * @src_nents: number of segments in input scatterlist
643b39b0 832 * @src_chained: if source is chained
acdca31d 833 * @dst_nents: number of segments in output scatterlist
643b39b0 834 * @dst_chained: if destination is chained
acdca31d
YK
835 * @iv_dma: dma address of iv for checking continuity and link table
836 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
a299c837
YK
837 * @sec4_sg_bytes: length of dma mapped sec4_sg space
838 * @sec4_sg_dma: bus physical mapped address of h/w link table
acdca31d
YK
839 * @hw_desc: the h/w job descriptor followed by any referenced link tables
840 */
841struct ablkcipher_edesc {
842 int src_nents;
643b39b0 843 bool src_chained;
acdca31d 844 int dst_nents;
643b39b0 845 bool dst_chained;
acdca31d 846 dma_addr_t iv_dma;
a299c837
YK
847 int sec4_sg_bytes;
848 dma_addr_t sec4_sg_dma;
849 struct sec4_sg_entry *sec4_sg;
acdca31d
YK
850 u32 hw_desc[0];
851};
852
1acebad3 853static void caam_unmap(struct device *dev, struct scatterlist *src,
643b39b0
YK
854 struct scatterlist *dst, int src_nents,
855 bool src_chained, int dst_nents, bool dst_chained,
a299c837
YK
856 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
857 int sec4_sg_bytes)
8e8ec596 858{
643b39b0
YK
859 if (dst != src) {
860 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
861 src_chained);
862 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
863 dst_chained);
8e8ec596 864 } else {
643b39b0
YK
865 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
866 DMA_BIDIRECTIONAL, src_chained);
8e8ec596
KP
867 }
868
1acebad3
YK
869 if (iv_dma)
870 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
a299c837
YK
871 if (sec4_sg_bytes)
872 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
8e8ec596
KP
873 DMA_TO_DEVICE);
874}
875
1acebad3
YK
876static void aead_unmap(struct device *dev,
877 struct aead_edesc *edesc,
878 struct aead_request *req)
879{
880 struct crypto_aead *aead = crypto_aead_reqtfm(req);
881 int ivsize = crypto_aead_ivsize(aead);
882
643b39b0
YK
883 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
884 DMA_TO_DEVICE, edesc->assoc_chained);
1acebad3
YK
885
886 caam_unmap(dev, req->src, req->dst,
643b39b0
YK
887 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
888 edesc->dst_chained, edesc->iv_dma, ivsize,
889 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1acebad3
YK
890}
891
acdca31d
YK
892static void ablkcipher_unmap(struct device *dev,
893 struct ablkcipher_edesc *edesc,
894 struct ablkcipher_request *req)
895{
896 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
897 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
898
899 caam_unmap(dev, req->src, req->dst,
643b39b0
YK
900 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
901 edesc->dst_chained, edesc->iv_dma, ivsize,
902 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
acdca31d
YK
903}
904
0e479300 905static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
8e8ec596
KP
906 void *context)
907{
0e479300
YK
908 struct aead_request *req = context;
909 struct aead_edesc *edesc;
8e8ec596 910#ifdef DEBUG
0e479300 911 struct crypto_aead *aead = crypto_aead_reqtfm(req);
8e8ec596 912 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1acebad3 913 int ivsize = crypto_aead_ivsize(aead);
8e8ec596
KP
914
915 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
916#endif
1acebad3 917
0e479300
YK
918 edesc = (struct aead_edesc *)((char *)desc -
919 offsetof(struct aead_edesc, hw_desc));
8e8ec596
KP
920
921 if (err) {
de2954d6 922 char tmp[CAAM_ERROR_STR_MAX];
8e8ec596 923
8e8ec596
KP
924 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
925 }
926
0e479300 927 aead_unmap(jrdev, edesc, req);
8e8ec596
KP
928
929#ifdef DEBUG
514df281 930 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
0e479300
YK
931 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
932 req->assoclen , 1);
514df281 933 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
0e479300 934 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
8e8ec596 935 edesc->src_nents ? 100 : ivsize, 1);
514df281 936 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
0e479300
YK
937 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
938 edesc->src_nents ? 100 : req->cryptlen +
8e8ec596
KP
939 ctx->authsize + 4, 1);
940#endif
941
942 kfree(edesc);
943
0e479300 944 aead_request_complete(req, err);
8e8ec596
KP
945}
946
0e479300 947static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
8e8ec596
KP
948 void *context)
949{
0e479300
YK
950 struct aead_request *req = context;
951 struct aead_edesc *edesc;
8e8ec596 952#ifdef DEBUG
0e479300 953 struct crypto_aead *aead = crypto_aead_reqtfm(req);
8e8ec596 954 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1acebad3 955 int ivsize = crypto_aead_ivsize(aead);
8e8ec596
KP
956
957 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
958#endif
1acebad3 959
0e479300
YK
960 edesc = (struct aead_edesc *)((char *)desc -
961 offsetof(struct aead_edesc, hw_desc));
8e8ec596 962
1acebad3 963#ifdef DEBUG
514df281 964 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
1acebad3
YK
965 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
966 ivsize, 1);
514df281 967 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
1acebad3 968 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
bbf9c893 969 req->cryptlen - ctx->authsize, 1);
1acebad3
YK
970#endif
971
8e8ec596 972 if (err) {
de2954d6 973 char tmp[CAAM_ERROR_STR_MAX];
8e8ec596
KP
974
975 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
976 }
977
0e479300 978 aead_unmap(jrdev, edesc, req);
8e8ec596
KP
979
980 /*
981 * verify hw auth check passed else return -EBADMSG
982 */
983 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
984 err = -EBADMSG;
985
986#ifdef DEBUG
514df281 987 print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
8e8ec596 988 DUMP_PREFIX_ADDRESS, 16, 4,
0e479300
YK
989 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
990 sizeof(struct iphdr) + req->assoclen +
991 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
8e8ec596 992 ctx->authsize + 36, 1);
a299c837 993 if (!err && edesc->sec4_sg_bytes) {
0e479300 994 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
514df281 995 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
8e8ec596
KP
996 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
997 sg->length + ctx->authsize + 16, 1);
998 }
999#endif
1acebad3 1000
8e8ec596
KP
1001 kfree(edesc);
1002
0e479300 1003 aead_request_complete(req, err);
8e8ec596
KP
1004}
1005
acdca31d
YK
1006static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1007 void *context)
1008{
1009 struct ablkcipher_request *req = context;
1010 struct ablkcipher_edesc *edesc;
1011#ifdef DEBUG
1012 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1013 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1014
1015 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1016#endif
1017
1018 edesc = (struct ablkcipher_edesc *)((char *)desc -
1019 offsetof(struct ablkcipher_edesc, hw_desc));
1020
1021 if (err) {
1022 char tmp[CAAM_ERROR_STR_MAX];
1023
1024 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
1025 }
1026
1027#ifdef DEBUG
514df281 1028 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
acdca31d
YK
1029 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1030 edesc->src_nents > 1 ? 100 : ivsize, 1);
514df281 1031 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
acdca31d
YK
1032 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1033 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1034#endif
1035
1036 ablkcipher_unmap(jrdev, edesc, req);
1037 kfree(edesc);
1038
1039 ablkcipher_request_complete(req, err);
1040}
1041
1042static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1043 void *context)
1044{
1045 struct ablkcipher_request *req = context;
1046 struct ablkcipher_edesc *edesc;
1047#ifdef DEBUG
1048 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1049 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1050
1051 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1052#endif
1053
1054 edesc = (struct ablkcipher_edesc *)((char *)desc -
1055 offsetof(struct ablkcipher_edesc, hw_desc));
1056 if (err) {
1057 char tmp[CAAM_ERROR_STR_MAX];
1058
1059 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
1060 }
1061
1062#ifdef DEBUG
514df281 1063 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
acdca31d
YK
1064 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1065 ivsize, 1);
514df281 1066 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
acdca31d
YK
1067 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1068 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1069#endif
1070
1071 ablkcipher_unmap(jrdev, edesc, req);
1072 kfree(edesc);
1073
1074 ablkcipher_request_complete(req, err);
1075}
1076
8e8ec596 1077/*
1acebad3 1078 * Fill in aead job descriptor
8e8ec596 1079 */
1acebad3
YK
1080static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1081 struct aead_edesc *edesc,
1082 struct aead_request *req,
1083 bool all_contig, bool encrypt)
8e8ec596 1084{
0e479300 1085 struct crypto_aead *aead = crypto_aead_reqtfm(req);
8e8ec596 1086 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8e8ec596
KP
1087 int ivsize = crypto_aead_ivsize(aead);
1088 int authsize = ctx->authsize;
1acebad3
YK
1089 u32 *desc = edesc->hw_desc;
1090 u32 out_options = 0, in_options;
1091 dma_addr_t dst_dma, src_dma;
a299c837 1092 int len, sec4_sg_index = 0;
8e8ec596 1093
1acebad3 1094#ifdef DEBUG
8e8ec596 1095 debug("assoclen %d cryptlen %d authsize %d\n",
0e479300 1096 req->assoclen, req->cryptlen, authsize);
514df281 1097 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
0e479300
YK
1098 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1099 req->assoclen , 1);
514df281 1100 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1acebad3 1101 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
8e8ec596 1102 edesc->src_nents ? 100 : ivsize, 1);
514df281 1103 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
0e479300 1104 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1acebad3 1105 edesc->src_nents ? 100 : req->cryptlen, 1);
514df281 1106 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
8e8ec596
KP
1107 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1108 desc_bytes(sh_desc), 1);
1109#endif
8e8ec596 1110
1acebad3
YK
1111 len = desc_len(sh_desc);
1112 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
8e8ec596 1113
1acebad3
YK
1114 if (all_contig) {
1115 src_dma = sg_dma_address(req->assoc);
1116 in_options = 0;
8e8ec596 1117 } else {
a299c837
YK
1118 src_dma = edesc->sec4_sg_dma;
1119 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
1120 (edesc->src_nents ? : 1);
1acebad3 1121 in_options = LDST_SGF;
8e8ec596 1122 }
bbf9c893
HG
1123
1124 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1125 in_options);
8e8ec596 1126
1acebad3
YK
1127 if (likely(req->src == req->dst)) {
1128 if (all_contig) {
1129 dst_dma = sg_dma_address(req->src);
1130 } else {
a299c837 1131 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1acebad3
YK
1132 ((edesc->assoc_nents ? : 1) + 1);
1133 out_options = LDST_SGF;
1134 }
8e8ec596 1135 } else {
8e8ec596 1136 if (!edesc->dst_nents) {
0e479300 1137 dst_dma = sg_dma_address(req->dst);
8e8ec596 1138 } else {
a299c837
YK
1139 dst_dma = edesc->sec4_sg_dma +
1140 sec4_sg_index *
1141 sizeof(struct sec4_sg_entry);
1acebad3 1142 out_options = LDST_SGF;
8e8ec596
KP
1143 }
1144 }
8e8ec596 1145 if (encrypt)
bbf9c893
HG
1146 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
1147 out_options);
8e8ec596 1148 else
1acebad3
YK
1149 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1150 out_options);
1151}
1152
1153/*
1154 * Fill in aead givencrypt job descriptor
1155 */
1156static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1157 struct aead_edesc *edesc,
1158 struct aead_request *req,
1159 int contig)
1160{
1161 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1162 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1163 int ivsize = crypto_aead_ivsize(aead);
1164 int authsize = ctx->authsize;
1165 u32 *desc = edesc->hw_desc;
1166 u32 out_options = 0, in_options;
1167 dma_addr_t dst_dma, src_dma;
a299c837 1168 int len, sec4_sg_index = 0;
8e8ec596
KP
1169
1170#ifdef DEBUG
1acebad3
YK
1171 debug("assoclen %d cryptlen %d authsize %d\n",
1172 req->assoclen, req->cryptlen, authsize);
514df281 1173 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
1acebad3
YK
1174 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1175 req->assoclen , 1);
514df281 1176 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1acebad3 1177 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
514df281 1178 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
1acebad3
YK
1179 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1180 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
514df281 1181 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
1acebad3
YK
1182 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1183 desc_bytes(sh_desc), 1);
8e8ec596
KP
1184#endif
1185
1acebad3
YK
1186 len = desc_len(sh_desc);
1187 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1188
1189 if (contig & GIV_SRC_CONTIG) {
1190 src_dma = sg_dma_address(req->assoc);
1191 in_options = 0;
1192 } else {
a299c837
YK
1193 src_dma = edesc->sec4_sg_dma;
1194 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1acebad3 1195 in_options = LDST_SGF;
8e8ec596 1196 }
bbf9c893
HG
1197 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1198 in_options);
8e8ec596 1199
1acebad3
YK
1200 if (contig & GIV_DST_CONTIG) {
1201 dst_dma = edesc->iv_dma;
1202 } else {
1203 if (likely(req->src == req->dst)) {
a299c837 1204 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1acebad3
YK
1205 edesc->assoc_nents;
1206 out_options = LDST_SGF;
1207 } else {
a299c837
YK
1208 dst_dma = edesc->sec4_sg_dma +
1209 sec4_sg_index *
1210 sizeof(struct sec4_sg_entry);
1acebad3
YK
1211 out_options = LDST_SGF;
1212 }
1213 }
1214
bbf9c893
HG
1215 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
1216 out_options);
8e8ec596
KP
1217}
1218
acdca31d
YK
1219/*
1220 * Fill in ablkcipher job descriptor
1221 */
1222static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1223 struct ablkcipher_edesc *edesc,
1224 struct ablkcipher_request *req,
1225 bool iv_contig)
1226{
1227 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1228 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1229 u32 *desc = edesc->hw_desc;
1230 u32 out_options = 0, in_options;
1231 dma_addr_t dst_dma, src_dma;
a299c837 1232 int len, sec4_sg_index = 0;
acdca31d
YK
1233
1234#ifdef DEBUG
514df281 1235 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
acdca31d
YK
1236 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1237 ivsize, 1);
514df281 1238 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
acdca31d
YK
1239 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1240 edesc->src_nents ? 100 : req->nbytes, 1);
1241#endif
1242
1243 len = desc_len(sh_desc);
1244 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1245
1246 if (iv_contig) {
1247 src_dma = edesc->iv_dma;
1248 in_options = 0;
1249 } else {
a299c837
YK
1250 src_dma = edesc->sec4_sg_dma;
1251 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
acdca31d
YK
1252 in_options = LDST_SGF;
1253 }
1254 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1255
1256 if (likely(req->src == req->dst)) {
1257 if (!edesc->src_nents && iv_contig) {
1258 dst_dma = sg_dma_address(req->src);
1259 } else {
a299c837
YK
1260 dst_dma = edesc->sec4_sg_dma +
1261 sizeof(struct sec4_sg_entry);
acdca31d
YK
1262 out_options = LDST_SGF;
1263 }
1264 } else {
1265 if (!edesc->dst_nents) {
1266 dst_dma = sg_dma_address(req->dst);
1267 } else {
a299c837
YK
1268 dst_dma = edesc->sec4_sg_dma +
1269 sec4_sg_index * sizeof(struct sec4_sg_entry);
acdca31d
YK
1270 out_options = LDST_SGF;
1271 }
1272 }
1273 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1274}
1275
8e8ec596 1276/*
1acebad3 1277 * allocate and map the aead extended descriptor
8e8ec596 1278 */
0e479300 1279static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
bbf9c893
HG
1280 int desc_bytes, bool *all_contig_ptr,
1281 bool encrypt)
8e8ec596 1282{
0e479300 1283 struct crypto_aead *aead = crypto_aead_reqtfm(req);
8e8ec596
KP
1284 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1285 struct device *jrdev = ctx->jrdev;
1acebad3
YK
1286 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1287 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1288 int assoc_nents, src_nents, dst_nents = 0;
0e479300 1289 struct aead_edesc *edesc;
1acebad3
YK
1290 dma_addr_t iv_dma = 0;
1291 int sgc;
1292 bool all_contig = true;
643b39b0 1293 bool assoc_chained = false, src_chained = false, dst_chained = false;
1acebad3 1294 int ivsize = crypto_aead_ivsize(aead);
a299c837 1295 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
bbf9c893 1296 unsigned int authsize = ctx->authsize;
8e8ec596 1297
643b39b0 1298 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1acebad3 1299
bbf9c893
HG
1300 if (unlikely(req->dst != req->src)) {
1301 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1302 dst_nents = sg_count(req->dst,
1303 req->cryptlen +
1304 (encrypt ? authsize : (-authsize)),
1305 &dst_chained);
1306 } else {
1307 src_nents = sg_count(req->src,
1308 req->cryptlen +
1309 (encrypt ? authsize : 0),
1310 &src_chained);
1311 }
1acebad3 1312
643b39b0 1313 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
286233e6 1314 DMA_TO_DEVICE, assoc_chained);
1acebad3 1315 if (likely(req->src == req->dst)) {
643b39b0
YK
1316 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1317 DMA_BIDIRECTIONAL, src_chained);
1acebad3 1318 } else {
643b39b0
YK
1319 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1320 DMA_TO_DEVICE, src_chained);
1321 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1322 DMA_FROM_DEVICE, dst_chained);
1acebad3
YK
1323 }
1324
1325 /* Check if data are contiguous */
1326 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1327 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1328 iv_dma || src_nents || iv_dma + ivsize !=
1329 sg_dma_address(req->src)) {
1330 all_contig = false;
1331 assoc_nents = assoc_nents ? : 1;
1332 src_nents = src_nents ? : 1;
a299c837 1333 sec4_sg_len = assoc_nents + 1 + src_nents;
8e8ec596 1334 }
a299c837 1335 sec4_sg_len += dst_nents;
8e8ec596 1336
a299c837 1337 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
8e8ec596
KP
1338
1339 /* allocate space for base edesc and hw desc commands, link tables */
0e479300 1340 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
a299c837 1341 sec4_sg_bytes, GFP_DMA | flags);
8e8ec596
KP
1342 if (!edesc) {
1343 dev_err(jrdev, "could not allocate extended descriptor\n");
1344 return ERR_PTR(-ENOMEM);
1345 }
1346
1347 edesc->assoc_nents = assoc_nents;
643b39b0 1348 edesc->assoc_chained = assoc_chained;
8e8ec596 1349 edesc->src_nents = src_nents;
643b39b0 1350 edesc->src_chained = src_chained;
8e8ec596 1351 edesc->dst_nents = dst_nents;
643b39b0 1352 edesc->dst_chained = dst_chained;
1acebad3 1353 edesc->iv_dma = iv_dma;
a299c837
YK
1354 edesc->sec4_sg_bytes = sec4_sg_bytes;
1355 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1356 desc_bytes;
1357 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1358 sec4_sg_bytes, DMA_TO_DEVICE);
1acebad3
YK
1359 *all_contig_ptr = all_contig;
1360
a299c837 1361 sec4_sg_index = 0;
1acebad3 1362 if (!all_contig) {
a299c837
YK
1363 sg_to_sec4_sg(req->assoc,
1364 (assoc_nents ? : 1),
1365 edesc->sec4_sg +
1366 sec4_sg_index, 0);
1367 sec4_sg_index += assoc_nents ? : 1;
1368 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1acebad3 1369 iv_dma, ivsize, 0);
a299c837
YK
1370 sec4_sg_index += 1;
1371 sg_to_sec4_sg_last(req->src,
1372 (src_nents ? : 1),
1373 edesc->sec4_sg +
1374 sec4_sg_index, 0);
1375 sec4_sg_index += src_nents ? : 1;
1acebad3
YK
1376 }
1377 if (dst_nents) {
a299c837
YK
1378 sg_to_sec4_sg_last(req->dst, dst_nents,
1379 edesc->sec4_sg + sec4_sg_index, 0);
1acebad3 1380 }
8e8ec596
KP
1381
1382 return edesc;
1383}
1384
0e479300 1385static int aead_encrypt(struct aead_request *req)
8e8ec596 1386{
0e479300
YK
1387 struct aead_edesc *edesc;
1388 struct crypto_aead *aead = crypto_aead_reqtfm(req);
8e8ec596
KP
1389 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1390 struct device *jrdev = ctx->jrdev;
1acebad3 1391 bool all_contig;
8e8ec596 1392 u32 *desc;
1acebad3
YK
1393 int ret = 0;
1394
8e8ec596 1395 /* allocate extended descriptor */
1acebad3 1396 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
bbf9c893 1397 CAAM_CMD_SZ, &all_contig, true);
8e8ec596
KP
1398 if (IS_ERR(edesc))
1399 return PTR_ERR(edesc);
1400
1acebad3
YK
1401 /* Create and submit job descriptor */
1402 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1403 all_contig, true);
1404#ifdef DEBUG
514df281 1405 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1acebad3
YK
1406 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1407 desc_bytes(edesc->hw_desc), 1);
1408#endif
8e8ec596 1409
1acebad3
YK
1410 desc = edesc->hw_desc;
1411 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1412 if (!ret) {
1413 ret = -EINPROGRESS;
1414 } else {
1415 aead_unmap(jrdev, edesc, req);
1416 kfree(edesc);
1417 }
8e8ec596 1418
1acebad3 1419 return ret;
8e8ec596
KP
1420}
1421
0e479300 1422static int aead_decrypt(struct aead_request *req)
8e8ec596 1423{
1acebad3 1424 struct aead_edesc *edesc;
8e8ec596 1425 struct crypto_aead *aead = crypto_aead_reqtfm(req);
8e8ec596
KP
1426 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1427 struct device *jrdev = ctx->jrdev;
1acebad3 1428 bool all_contig;
8e8ec596 1429 u32 *desc;
1acebad3 1430 int ret = 0;
8e8ec596
KP
1431
1432 /* allocate extended descriptor */
1acebad3 1433 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
bbf9c893 1434 CAAM_CMD_SZ, &all_contig, false);
8e8ec596
KP
1435 if (IS_ERR(edesc))
1436 return PTR_ERR(edesc);
1437
1acebad3 1438#ifdef DEBUG
514df281 1439 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
1acebad3
YK
1440 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1441 req->cryptlen, 1);
1442#endif
1443
1444 /* Create and submit job descriptor*/
1445 init_aead_job(ctx->sh_desc_dec,
1446 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1447#ifdef DEBUG
514df281 1448 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1acebad3
YK
1449 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1450 desc_bytes(edesc->hw_desc), 1);
1451#endif
1452
8e8ec596 1453 desc = edesc->hw_desc;
1acebad3
YK
1454 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1455 if (!ret) {
1456 ret = -EINPROGRESS;
1457 } else {
1458 aead_unmap(jrdev, edesc, req);
1459 kfree(edesc);
1460 }
8e8ec596 1461
1acebad3
YK
1462 return ret;
1463}
8e8ec596 1464
1acebad3
YK
1465/*
1466 * allocate and map the aead extended descriptor for aead givencrypt
1467 */
1468static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1469 *greq, int desc_bytes,
1470 u32 *contig_ptr)
1471{
1472 struct aead_request *req = &greq->areq;
1473 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1474 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1475 struct device *jrdev = ctx->jrdev;
1476 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1477 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1478 int assoc_nents, src_nents, dst_nents = 0;
1479 struct aead_edesc *edesc;
1480 dma_addr_t iv_dma = 0;
1481 int sgc;
1482 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1483 int ivsize = crypto_aead_ivsize(aead);
643b39b0 1484 bool assoc_chained = false, src_chained = false, dst_chained = false;
a299c837 1485 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
8e8ec596 1486
643b39b0
YK
1487 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1488 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
8e8ec596 1489
1acebad3 1490 if (unlikely(req->dst != req->src))
bbf9c893
HG
1491 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
1492 &dst_chained);
1acebad3 1493
643b39b0 1494 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
286233e6 1495 DMA_TO_DEVICE, assoc_chained);
1acebad3 1496 if (likely(req->src == req->dst)) {
643b39b0
YK
1497 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1498 DMA_BIDIRECTIONAL, src_chained);
1acebad3 1499 } else {
643b39b0
YK
1500 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1501 DMA_TO_DEVICE, src_chained);
1502 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1503 DMA_FROM_DEVICE, dst_chained);
1acebad3
YK
1504 }
1505
1506 /* Check if data are contiguous */
1507 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1508 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1509 iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1510 contig &= ~GIV_SRC_CONTIG;
1511 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1512 contig &= ~GIV_DST_CONTIG;
2af8f4a2
KP
1513 if (unlikely(req->src != req->dst)) {
1514 dst_nents = dst_nents ? : 1;
1515 sec4_sg_len += 1;
1516 }
1acebad3
YK
1517 if (!(contig & GIV_SRC_CONTIG)) {
1518 assoc_nents = assoc_nents ? : 1;
1519 src_nents = src_nents ? : 1;
a299c837 1520 sec4_sg_len += assoc_nents + 1 + src_nents;
1acebad3
YK
1521 if (likely(req->src == req->dst))
1522 contig &= ~GIV_DST_CONTIG;
1523 }
a299c837 1524 sec4_sg_len += dst_nents;
1acebad3 1525
a299c837 1526 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1acebad3
YK
1527
1528 /* allocate space for base edesc and hw desc commands, link tables */
1529 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
a299c837 1530 sec4_sg_bytes, GFP_DMA | flags);
1acebad3
YK
1531 if (!edesc) {
1532 dev_err(jrdev, "could not allocate extended descriptor\n");
1533 return ERR_PTR(-ENOMEM);
1534 }
1535
1536 edesc->assoc_nents = assoc_nents;
643b39b0 1537 edesc->assoc_chained = assoc_chained;
1acebad3 1538 edesc->src_nents = src_nents;
643b39b0 1539 edesc->src_chained = src_chained;
1acebad3 1540 edesc->dst_nents = dst_nents;
643b39b0 1541 edesc->dst_chained = dst_chained;
1acebad3 1542 edesc->iv_dma = iv_dma;
a299c837
YK
1543 edesc->sec4_sg_bytes = sec4_sg_bytes;
1544 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1545 desc_bytes;
1546 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1547 sec4_sg_bytes, DMA_TO_DEVICE);
1acebad3
YK
1548 *contig_ptr = contig;
1549
a299c837 1550 sec4_sg_index = 0;
1acebad3 1551 if (!(contig & GIV_SRC_CONTIG)) {
a299c837
YK
1552 sg_to_sec4_sg(req->assoc, assoc_nents,
1553 edesc->sec4_sg +
1554 sec4_sg_index, 0);
1555 sec4_sg_index += assoc_nents;
1556 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1acebad3 1557 iv_dma, ivsize, 0);
a299c837
YK
1558 sec4_sg_index += 1;
1559 sg_to_sec4_sg_last(req->src, src_nents,
1560 edesc->sec4_sg +
1561 sec4_sg_index, 0);
1562 sec4_sg_index += src_nents;
1acebad3
YK
1563 }
1564 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
a299c837 1565 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1acebad3 1566 iv_dma, ivsize, 0);
a299c837
YK
1567 sec4_sg_index += 1;
1568 sg_to_sec4_sg_last(req->dst, dst_nents,
1569 edesc->sec4_sg + sec4_sg_index, 0);
1acebad3
YK
1570 }
1571
1572 return edesc;
8e8ec596
KP
1573}
1574
0e479300 1575static int aead_givencrypt(struct aead_givcrypt_request *areq)
8e8ec596 1576{
0e479300
YK
1577 struct aead_request *req = &areq->areq;
1578 struct aead_edesc *edesc;
1579 struct crypto_aead *aead = crypto_aead_reqtfm(req);
8e8ec596
KP
1580 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1581 struct device *jrdev = ctx->jrdev;
1acebad3 1582 u32 contig;
8e8ec596 1583 u32 *desc;
1acebad3 1584 int ret = 0;
8e8ec596 1585
8e8ec596 1586 /* allocate extended descriptor */
1acebad3
YK
1587 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1588 CAAM_CMD_SZ, &contig);
1589
8e8ec596
KP
1590 if (IS_ERR(edesc))
1591 return PTR_ERR(edesc);
1592
1acebad3 1593#ifdef DEBUG
514df281 1594 print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
1acebad3
YK
1595 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1596 req->cryptlen, 1);
1597#endif
8e8ec596 1598
1acebad3
YK
1599 /* Create and submit job descriptor*/
1600 init_aead_giv_job(ctx->sh_desc_givenc,
1601 ctx->sh_desc_givenc_dma, edesc, req, contig);
1602#ifdef DEBUG
514df281 1603 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1acebad3
YK
1604 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1605 desc_bytes(edesc->hw_desc), 1);
1606#endif
8e8ec596 1607
1acebad3
YK
1608 desc = edesc->hw_desc;
1609 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1610 if (!ret) {
1611 ret = -EINPROGRESS;
1612 } else {
1613 aead_unmap(jrdev, edesc, req);
1614 kfree(edesc);
1615 }
8e8ec596 1616
1acebad3 1617 return ret;
8e8ec596
KP
1618}
1619
ae4a825f
HG
1620static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
1621{
1622 return aead_encrypt(&areq->areq);
1623}
1624
acdca31d
YK
1625/*
1626 * allocate and map the ablkcipher extended descriptor for ablkcipher
1627 */
1628static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1629 *req, int desc_bytes,
1630 bool *iv_contig_out)
1631{
1632 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1633 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1634 struct device *jrdev = ctx->jrdev;
1635 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1636 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1637 GFP_KERNEL : GFP_ATOMIC;
a299c837 1638 int src_nents, dst_nents = 0, sec4_sg_bytes;
acdca31d
YK
1639 struct ablkcipher_edesc *edesc;
1640 dma_addr_t iv_dma = 0;
1641 bool iv_contig = false;
1642 int sgc;
1643 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
643b39b0 1644 bool src_chained = false, dst_chained = false;
a299c837 1645 int sec4_sg_index;
acdca31d 1646
643b39b0 1647 src_nents = sg_count(req->src, req->nbytes, &src_chained);
acdca31d 1648
643b39b0
YK
1649 if (req->dst != req->src)
1650 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
acdca31d
YK
1651
1652 if (likely(req->src == req->dst)) {
643b39b0
YK
1653 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1654 DMA_BIDIRECTIONAL, src_chained);
acdca31d 1655 } else {
643b39b0
YK
1656 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1657 DMA_TO_DEVICE, src_chained);
1658 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1659 DMA_FROM_DEVICE, dst_chained);
acdca31d
YK
1660 }
1661
1662 /*
1663 * Check if iv can be contiguous with source and destination.
1664 * If so, include it. If not, create scatterlist.
1665 */
1666 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1667 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1668 iv_contig = true;
1669 else
1670 src_nents = src_nents ? : 1;
a299c837
YK
1671 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1672 sizeof(struct sec4_sg_entry);
acdca31d
YK
1673
1674 /* allocate space for base edesc and hw desc commands, link tables */
1675 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
a299c837 1676 sec4_sg_bytes, GFP_DMA | flags);
acdca31d
YK
1677 if (!edesc) {
1678 dev_err(jrdev, "could not allocate extended descriptor\n");
1679 return ERR_PTR(-ENOMEM);
1680 }
1681
1682 edesc->src_nents = src_nents;
643b39b0 1683 edesc->src_chained = src_chained;
acdca31d 1684 edesc->dst_nents = dst_nents;
643b39b0 1685 edesc->dst_chained = dst_chained;
a299c837
YK
1686 edesc->sec4_sg_bytes = sec4_sg_bytes;
1687 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1688 desc_bytes;
acdca31d 1689
a299c837 1690 sec4_sg_index = 0;
acdca31d 1691 if (!iv_contig) {
a299c837
YK
1692 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1693 sg_to_sec4_sg_last(req->src, src_nents,
1694 edesc->sec4_sg + 1, 0);
1695 sec4_sg_index += 1 + src_nents;
acdca31d
YK
1696 }
1697
643b39b0 1698 if (dst_nents) {
a299c837
YK
1699 sg_to_sec4_sg_last(req->dst, dst_nents,
1700 edesc->sec4_sg + sec4_sg_index, 0);
acdca31d
YK
1701 }
1702
a299c837
YK
1703 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1704 sec4_sg_bytes, DMA_TO_DEVICE);
acdca31d
YK
1705 edesc->iv_dma = iv_dma;
1706
1707#ifdef DEBUG
514df281 1708 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
a299c837
YK
1709 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1710 sec4_sg_bytes, 1);
acdca31d
YK
1711#endif
1712
1713 *iv_contig_out = iv_contig;
1714 return edesc;
1715}
1716
1717static int ablkcipher_encrypt(struct ablkcipher_request *req)
1718{
1719 struct ablkcipher_edesc *edesc;
1720 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1721 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1722 struct device *jrdev = ctx->jrdev;
1723 bool iv_contig;
1724 u32 *desc;
1725 int ret = 0;
1726
1727 /* allocate extended descriptor */
1728 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1729 CAAM_CMD_SZ, &iv_contig);
1730 if (IS_ERR(edesc))
1731 return PTR_ERR(edesc);
1732
1733 /* Create and submit job descriptor*/
1734 init_ablkcipher_job(ctx->sh_desc_enc,
1735 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1736#ifdef DEBUG
514df281 1737 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
acdca31d
YK
1738 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1739 desc_bytes(edesc->hw_desc), 1);
1740#endif
1741 desc = edesc->hw_desc;
1742 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1743
1744 if (!ret) {
1745 ret = -EINPROGRESS;
1746 } else {
1747 ablkcipher_unmap(jrdev, edesc, req);
1748 kfree(edesc);
1749 }
1750
1751 return ret;
1752}
1753
1754static int ablkcipher_decrypt(struct ablkcipher_request *req)
1755{
1756 struct ablkcipher_edesc *edesc;
1757 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1758 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1759 struct device *jrdev = ctx->jrdev;
1760 bool iv_contig;
1761 u32 *desc;
1762 int ret = 0;
1763
1764 /* allocate extended descriptor */
1765 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1766 CAAM_CMD_SZ, &iv_contig);
1767 if (IS_ERR(edesc))
1768 return PTR_ERR(edesc);
1769
1770 /* Create and submit job descriptor*/
1771 init_ablkcipher_job(ctx->sh_desc_dec,
1772 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1773 desc = edesc->hw_desc;
1774#ifdef DEBUG
514df281 1775 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
acdca31d
YK
1776 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1777 desc_bytes(edesc->hw_desc), 1);
1778#endif
1779
1780 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1781 if (!ret) {
1782 ret = -EINPROGRESS;
1783 } else {
1784 ablkcipher_unmap(jrdev, edesc, req);
1785 kfree(edesc);
1786 }
1787
1788 return ret;
1789}
1790
885e9e2f 1791#define template_aead template_u.aead
acdca31d 1792#define template_ablkcipher template_u.ablkcipher
8e8ec596
KP
1793struct caam_alg_template {
1794 char name[CRYPTO_MAX_ALG_NAME];
1795 char driver_name[CRYPTO_MAX_ALG_NAME];
1796 unsigned int blocksize;
885e9e2f
YK
1797 u32 type;
1798 union {
1799 struct ablkcipher_alg ablkcipher;
1800 struct aead_alg aead;
1801 struct blkcipher_alg blkcipher;
1802 struct cipher_alg cipher;
1803 struct compress_alg compress;
1804 struct rng_alg rng;
1805 } template_u;
8e8ec596
KP
1806 u32 class1_alg_type;
1807 u32 class2_alg_type;
1808 u32 alg_op;
1809};
1810
1811static struct caam_alg_template driver_algs[] = {
246bbedb 1812 /* single-pass ipsec_esp descriptor */
ae4a825f
HG
1813 {
1814 .name = "authenc(hmac(md5),ecb(cipher_null))",
1815 .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
1816 .blocksize = NULL_BLOCK_SIZE,
1817 .type = CRYPTO_ALG_TYPE_AEAD,
1818 .template_aead = {
1819 .setkey = aead_setkey,
1820 .setauthsize = aead_setauthsize,
1821 .encrypt = aead_encrypt,
1822 .decrypt = aead_decrypt,
1823 .givencrypt = aead_null_givencrypt,
1824 .geniv = "<built-in>",
1825 .ivsize = NULL_IV_SIZE,
1826 .maxauthsize = MD5_DIGEST_SIZE,
1827 },
1828 .class1_alg_type = 0,
1829 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1830 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1831 },
1832 {
1833 .name = "authenc(hmac(sha1),ecb(cipher_null))",
1834 .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
1835 .blocksize = NULL_BLOCK_SIZE,
1836 .type = CRYPTO_ALG_TYPE_AEAD,
1837 .template_aead = {
1838 .setkey = aead_setkey,
1839 .setauthsize = aead_setauthsize,
1840 .encrypt = aead_encrypt,
1841 .decrypt = aead_decrypt,
1842 .givencrypt = aead_null_givencrypt,
1843 .geniv = "<built-in>",
1844 .ivsize = NULL_IV_SIZE,
1845 .maxauthsize = SHA1_DIGEST_SIZE,
1846 },
1847 .class1_alg_type = 0,
1848 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1849 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1850 },
1851 {
1852 .name = "authenc(hmac(sha224),ecb(cipher_null))",
1853 .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
1854 .blocksize = NULL_BLOCK_SIZE,
1855 .type = CRYPTO_ALG_TYPE_AEAD,
1856 .template_aead = {
1857 .setkey = aead_setkey,
1858 .setauthsize = aead_setauthsize,
1859 .encrypt = aead_encrypt,
1860 .decrypt = aead_decrypt,
1861 .givencrypt = aead_null_givencrypt,
1862 .geniv = "<built-in>",
1863 .ivsize = NULL_IV_SIZE,
1864 .maxauthsize = SHA224_DIGEST_SIZE,
1865 },
1866 .class1_alg_type = 0,
1867 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1868 OP_ALG_AAI_HMAC_PRECOMP,
1869 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1870 },
1871 {
1872 .name = "authenc(hmac(sha256),ecb(cipher_null))",
1873 .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
1874 .blocksize = NULL_BLOCK_SIZE,
1875 .type = CRYPTO_ALG_TYPE_AEAD,
1876 .template_aead = {
1877 .setkey = aead_setkey,
1878 .setauthsize = aead_setauthsize,
1879 .encrypt = aead_encrypt,
1880 .decrypt = aead_decrypt,
1881 .givencrypt = aead_null_givencrypt,
1882 .geniv = "<built-in>",
1883 .ivsize = NULL_IV_SIZE,
1884 .maxauthsize = SHA256_DIGEST_SIZE,
1885 },
1886 .class1_alg_type = 0,
1887 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1888 OP_ALG_AAI_HMAC_PRECOMP,
1889 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1890 },
1891 {
1892 .name = "authenc(hmac(sha384),ecb(cipher_null))",
1893 .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
1894 .blocksize = NULL_BLOCK_SIZE,
1895 .type = CRYPTO_ALG_TYPE_AEAD,
1896 .template_aead = {
1897 .setkey = aead_setkey,
1898 .setauthsize = aead_setauthsize,
1899 .encrypt = aead_encrypt,
1900 .decrypt = aead_decrypt,
1901 .givencrypt = aead_null_givencrypt,
1902 .geniv = "<built-in>",
1903 .ivsize = NULL_IV_SIZE,
1904 .maxauthsize = SHA384_DIGEST_SIZE,
1905 },
1906 .class1_alg_type = 0,
1907 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1908 OP_ALG_AAI_HMAC_PRECOMP,
1909 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1910 },
1911 {
1912 .name = "authenc(hmac(sha512),ecb(cipher_null))",
1913 .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
1914 .blocksize = NULL_BLOCK_SIZE,
1915 .type = CRYPTO_ALG_TYPE_AEAD,
1916 .template_aead = {
1917 .setkey = aead_setkey,
1918 .setauthsize = aead_setauthsize,
1919 .encrypt = aead_encrypt,
1920 .decrypt = aead_decrypt,
1921 .givencrypt = aead_null_givencrypt,
1922 .geniv = "<built-in>",
1923 .ivsize = NULL_IV_SIZE,
1924 .maxauthsize = SHA512_DIGEST_SIZE,
1925 },
1926 .class1_alg_type = 0,
1927 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1928 OP_ALG_AAI_HMAC_PRECOMP,
1929 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1930 },
8b4d43a4
KP
1931 {
1932 .name = "authenc(hmac(md5),cbc(aes))",
1933 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
1934 .blocksize = AES_BLOCK_SIZE,
1935 .type = CRYPTO_ALG_TYPE_AEAD,
1936 .template_aead = {
1937 .setkey = aead_setkey,
1938 .setauthsize = aead_setauthsize,
1939 .encrypt = aead_encrypt,
1940 .decrypt = aead_decrypt,
1941 .givencrypt = aead_givencrypt,
1942 .geniv = "<built-in>",
1943 .ivsize = AES_BLOCK_SIZE,
1944 .maxauthsize = MD5_DIGEST_SIZE,
1945 },
1946 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1947 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1948 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1949 },
8e8ec596
KP
1950 {
1951 .name = "authenc(hmac(sha1),cbc(aes))",
1952 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1953 .blocksize = AES_BLOCK_SIZE,
885e9e2f
YK
1954 .type = CRYPTO_ALG_TYPE_AEAD,
1955 .template_aead = {
0e479300
YK
1956 .setkey = aead_setkey,
1957 .setauthsize = aead_setauthsize,
1958 .encrypt = aead_encrypt,
1959 .decrypt = aead_decrypt,
1960 .givencrypt = aead_givencrypt,
8e8ec596
KP
1961 .geniv = "<built-in>",
1962 .ivsize = AES_BLOCK_SIZE,
1963 .maxauthsize = SHA1_DIGEST_SIZE,
1964 },
1965 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1966 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1967 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1968 },
e863f9cc
HA
1969 {
1970 .name = "authenc(hmac(sha224),cbc(aes))",
1971 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
1972 .blocksize = AES_BLOCK_SIZE,
cb7d5662 1973 .type = CRYPTO_ALG_TYPE_AEAD,
e863f9cc
HA
1974 .template_aead = {
1975 .setkey = aead_setkey,
1976 .setauthsize = aead_setauthsize,
1977 .encrypt = aead_encrypt,
1978 .decrypt = aead_decrypt,
1979 .givencrypt = aead_givencrypt,
1980 .geniv = "<built-in>",
1981 .ivsize = AES_BLOCK_SIZE,
1982 .maxauthsize = SHA224_DIGEST_SIZE,
1983 },
1984 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1985 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1986 OP_ALG_AAI_HMAC_PRECOMP,
1987 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1988 },
8e8ec596
KP
1989 {
1990 .name = "authenc(hmac(sha256),cbc(aes))",
1991 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1992 .blocksize = AES_BLOCK_SIZE,
885e9e2f
YK
1993 .type = CRYPTO_ALG_TYPE_AEAD,
1994 .template_aead = {
0e479300
YK
1995 .setkey = aead_setkey,
1996 .setauthsize = aead_setauthsize,
1997 .encrypt = aead_encrypt,
1998 .decrypt = aead_decrypt,
1999 .givencrypt = aead_givencrypt,
8e8ec596
KP
2000 .geniv = "<built-in>",
2001 .ivsize = AES_BLOCK_SIZE,
2002 .maxauthsize = SHA256_DIGEST_SIZE,
2003 },
2004 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2005 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2006 OP_ALG_AAI_HMAC_PRECOMP,
2007 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2008 },
e863f9cc
HA
2009 {
2010 .name = "authenc(hmac(sha384),cbc(aes))",
2011 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
2012 .blocksize = AES_BLOCK_SIZE,
cb7d5662 2013 .type = CRYPTO_ALG_TYPE_AEAD,
e863f9cc
HA
2014 .template_aead = {
2015 .setkey = aead_setkey,
2016 .setauthsize = aead_setauthsize,
2017 .encrypt = aead_encrypt,
2018 .decrypt = aead_decrypt,
2019 .givencrypt = aead_givencrypt,
2020 .geniv = "<built-in>",
2021 .ivsize = AES_BLOCK_SIZE,
2022 .maxauthsize = SHA384_DIGEST_SIZE,
2023 },
2024 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2025 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2026 OP_ALG_AAI_HMAC_PRECOMP,
2027 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2028 },
2029
4427b1b4
KP
2030 {
2031 .name = "authenc(hmac(sha512),cbc(aes))",
2032 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
2033 .blocksize = AES_BLOCK_SIZE,
885e9e2f
YK
2034 .type = CRYPTO_ALG_TYPE_AEAD,
2035 .template_aead = {
0e479300
YK
2036 .setkey = aead_setkey,
2037 .setauthsize = aead_setauthsize,
2038 .encrypt = aead_encrypt,
2039 .decrypt = aead_decrypt,
2040 .givencrypt = aead_givencrypt,
4427b1b4
KP
2041 .geniv = "<built-in>",
2042 .ivsize = AES_BLOCK_SIZE,
2043 .maxauthsize = SHA512_DIGEST_SIZE,
2044 },
2045 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2046 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2047 OP_ALG_AAI_HMAC_PRECOMP,
2048 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2049 },
8b4d43a4
KP
2050 {
2051 .name = "authenc(hmac(md5),cbc(des3_ede))",
2052 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
2053 .blocksize = DES3_EDE_BLOCK_SIZE,
2054 .type = CRYPTO_ALG_TYPE_AEAD,
2055 .template_aead = {
2056 .setkey = aead_setkey,
2057 .setauthsize = aead_setauthsize,
2058 .encrypt = aead_encrypt,
2059 .decrypt = aead_decrypt,
2060 .givencrypt = aead_givencrypt,
2061 .geniv = "<built-in>",
2062 .ivsize = DES3_EDE_BLOCK_SIZE,
2063 .maxauthsize = MD5_DIGEST_SIZE,
2064 },
2065 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2066 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2067 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2068 },
8e8ec596
KP
2069 {
2070 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2071 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
2072 .blocksize = DES3_EDE_BLOCK_SIZE,
885e9e2f
YK
2073 .type = CRYPTO_ALG_TYPE_AEAD,
2074 .template_aead = {
0e479300
YK
2075 .setkey = aead_setkey,
2076 .setauthsize = aead_setauthsize,
2077 .encrypt = aead_encrypt,
2078 .decrypt = aead_decrypt,
2079 .givencrypt = aead_givencrypt,
8e8ec596
KP
2080 .geniv = "<built-in>",
2081 .ivsize = DES3_EDE_BLOCK_SIZE,
2082 .maxauthsize = SHA1_DIGEST_SIZE,
2083 },
2084 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2085 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2086 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2087 },
e863f9cc
HA
2088 {
2089 .name = "authenc(hmac(sha224),cbc(des3_ede))",
2090 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
2091 .blocksize = DES3_EDE_BLOCK_SIZE,
cb7d5662 2092 .type = CRYPTO_ALG_TYPE_AEAD,
e863f9cc
HA
2093 .template_aead = {
2094 .setkey = aead_setkey,
2095 .setauthsize = aead_setauthsize,
2096 .encrypt = aead_encrypt,
2097 .decrypt = aead_decrypt,
2098 .givencrypt = aead_givencrypt,
2099 .geniv = "<built-in>",
2100 .ivsize = DES3_EDE_BLOCK_SIZE,
2101 .maxauthsize = SHA224_DIGEST_SIZE,
2102 },
2103 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2104 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2105 OP_ALG_AAI_HMAC_PRECOMP,
2106 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2107 },
8e8ec596
KP
2108 {
2109 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2110 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
2111 .blocksize = DES3_EDE_BLOCK_SIZE,
885e9e2f
YK
2112 .type = CRYPTO_ALG_TYPE_AEAD,
2113 .template_aead = {
0e479300
YK
2114 .setkey = aead_setkey,
2115 .setauthsize = aead_setauthsize,
2116 .encrypt = aead_encrypt,
2117 .decrypt = aead_decrypt,
2118 .givencrypt = aead_givencrypt,
8e8ec596
KP
2119 .geniv = "<built-in>",
2120 .ivsize = DES3_EDE_BLOCK_SIZE,
2121 .maxauthsize = SHA256_DIGEST_SIZE,
2122 },
2123 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2124 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2125 OP_ALG_AAI_HMAC_PRECOMP,
2126 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2127 },
e863f9cc
HA
2128 {
2129 .name = "authenc(hmac(sha384),cbc(des3_ede))",
2130 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
2131 .blocksize = DES3_EDE_BLOCK_SIZE,
cb7d5662 2132 .type = CRYPTO_ALG_TYPE_AEAD,
e863f9cc
HA
2133 .template_aead = {
2134 .setkey = aead_setkey,
2135 .setauthsize = aead_setauthsize,
2136 .encrypt = aead_encrypt,
2137 .decrypt = aead_decrypt,
2138 .givencrypt = aead_givencrypt,
2139 .geniv = "<built-in>",
2140 .ivsize = DES3_EDE_BLOCK_SIZE,
2141 .maxauthsize = SHA384_DIGEST_SIZE,
2142 },
2143 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2144 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2145 OP_ALG_AAI_HMAC_PRECOMP,
2146 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2147 },
4427b1b4
KP
2148 {
2149 .name = "authenc(hmac(sha512),cbc(des3_ede))",
2150 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
2151 .blocksize = DES3_EDE_BLOCK_SIZE,
885e9e2f
YK
2152 .type = CRYPTO_ALG_TYPE_AEAD,
2153 .template_aead = {
0e479300
YK
2154 .setkey = aead_setkey,
2155 .setauthsize = aead_setauthsize,
2156 .encrypt = aead_encrypt,
2157 .decrypt = aead_decrypt,
2158 .givencrypt = aead_givencrypt,
4427b1b4
KP
2159 .geniv = "<built-in>",
2160 .ivsize = DES3_EDE_BLOCK_SIZE,
2161 .maxauthsize = SHA512_DIGEST_SIZE,
2162 },
2163 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2164 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2165 OP_ALG_AAI_HMAC_PRECOMP,
2166 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2167 },
8b4d43a4
KP
2168 {
2169 .name = "authenc(hmac(md5),cbc(des))",
2170 .driver_name = "authenc-hmac-md5-cbc-des-caam",
2171 .blocksize = DES_BLOCK_SIZE,
2172 .type = CRYPTO_ALG_TYPE_AEAD,
2173 .template_aead = {
2174 .setkey = aead_setkey,
2175 .setauthsize = aead_setauthsize,
2176 .encrypt = aead_encrypt,
2177 .decrypt = aead_decrypt,
2178 .givencrypt = aead_givencrypt,
2179 .geniv = "<built-in>",
2180 .ivsize = DES_BLOCK_SIZE,
2181 .maxauthsize = MD5_DIGEST_SIZE,
2182 },
2183 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2184 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2185 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2186 },
8e8ec596
KP
2187 {
2188 .name = "authenc(hmac(sha1),cbc(des))",
2189 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
2190 .blocksize = DES_BLOCK_SIZE,
885e9e2f
YK
2191 .type = CRYPTO_ALG_TYPE_AEAD,
2192 .template_aead = {
0e479300
YK
2193 .setkey = aead_setkey,
2194 .setauthsize = aead_setauthsize,
2195 .encrypt = aead_encrypt,
2196 .decrypt = aead_decrypt,
2197 .givencrypt = aead_givencrypt,
8e8ec596
KP
2198 .geniv = "<built-in>",
2199 .ivsize = DES_BLOCK_SIZE,
2200 .maxauthsize = SHA1_DIGEST_SIZE,
2201 },
2202 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2203 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2204 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2205 },
e863f9cc
HA
2206 {
2207 .name = "authenc(hmac(sha224),cbc(des))",
2208 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
2209 .blocksize = DES_BLOCK_SIZE,
cb7d5662 2210 .type = CRYPTO_ALG_TYPE_AEAD,
e863f9cc
HA
2211 .template_aead = {
2212 .setkey = aead_setkey,
2213 .setauthsize = aead_setauthsize,
2214 .encrypt = aead_encrypt,
2215 .decrypt = aead_decrypt,
2216 .givencrypt = aead_givencrypt,
2217 .geniv = "<built-in>",
2218 .ivsize = DES_BLOCK_SIZE,
2219 .maxauthsize = SHA224_DIGEST_SIZE,
2220 },
2221 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2222 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2223 OP_ALG_AAI_HMAC_PRECOMP,
2224 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2225 },
8e8ec596
KP
2226 {
2227 .name = "authenc(hmac(sha256),cbc(des))",
2228 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
2229 .blocksize = DES_BLOCK_SIZE,
885e9e2f
YK
2230 .type = CRYPTO_ALG_TYPE_AEAD,
2231 .template_aead = {
0e479300
YK
2232 .setkey = aead_setkey,
2233 .setauthsize = aead_setauthsize,
2234 .encrypt = aead_encrypt,
2235 .decrypt = aead_decrypt,
2236 .givencrypt = aead_givencrypt,
8e8ec596
KP
2237 .geniv = "<built-in>",
2238 .ivsize = DES_BLOCK_SIZE,
2239 .maxauthsize = SHA256_DIGEST_SIZE,
2240 },
2241 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2242 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2243 OP_ALG_AAI_HMAC_PRECOMP,
2244 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2245 },
e863f9cc
HA
2246 {
2247 .name = "authenc(hmac(sha384),cbc(des))",
2248 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
2249 .blocksize = DES_BLOCK_SIZE,
cb7d5662 2250 .type = CRYPTO_ALG_TYPE_AEAD,
e863f9cc
HA
2251 .template_aead = {
2252 .setkey = aead_setkey,
2253 .setauthsize = aead_setauthsize,
2254 .encrypt = aead_encrypt,
2255 .decrypt = aead_decrypt,
2256 .givencrypt = aead_givencrypt,
2257 .geniv = "<built-in>",
2258 .ivsize = DES_BLOCK_SIZE,
2259 .maxauthsize = SHA384_DIGEST_SIZE,
2260 },
2261 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2262 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2263 OP_ALG_AAI_HMAC_PRECOMP,
2264 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2265 },
4427b1b4
KP
2266 {
2267 .name = "authenc(hmac(sha512),cbc(des))",
2268 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
2269 .blocksize = DES_BLOCK_SIZE,
885e9e2f
YK
2270 .type = CRYPTO_ALG_TYPE_AEAD,
2271 .template_aead = {
0e479300
YK
2272 .setkey = aead_setkey,
2273 .setauthsize = aead_setauthsize,
2274 .encrypt = aead_encrypt,
2275 .decrypt = aead_decrypt,
2276 .givencrypt = aead_givencrypt,
4427b1b4
KP
2277 .geniv = "<built-in>",
2278 .ivsize = DES_BLOCK_SIZE,
2279 .maxauthsize = SHA512_DIGEST_SIZE,
2280 },
2281 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2282 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2283 OP_ALG_AAI_HMAC_PRECOMP,
2284 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2285 },
acdca31d
YK
2286 /* ablkcipher descriptor */
2287 {
2288 .name = "cbc(aes)",
2289 .driver_name = "cbc-aes-caam",
2290 .blocksize = AES_BLOCK_SIZE,
2291 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2292 .template_ablkcipher = {
2293 .setkey = ablkcipher_setkey,
2294 .encrypt = ablkcipher_encrypt,
2295 .decrypt = ablkcipher_decrypt,
2296 .geniv = "eseqiv",
2297 .min_keysize = AES_MIN_KEY_SIZE,
2298 .max_keysize = AES_MAX_KEY_SIZE,
2299 .ivsize = AES_BLOCK_SIZE,
2300 },
2301 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2302 },
2303 {
2304 .name = "cbc(des3_ede)",
2305 .driver_name = "cbc-3des-caam",
2306 .blocksize = DES3_EDE_BLOCK_SIZE,
2307 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2308 .template_ablkcipher = {
2309 .setkey = ablkcipher_setkey,
2310 .encrypt = ablkcipher_encrypt,
2311 .decrypt = ablkcipher_decrypt,
2312 .geniv = "eseqiv",
2313 .min_keysize = DES3_EDE_KEY_SIZE,
2314 .max_keysize = DES3_EDE_KEY_SIZE,
2315 .ivsize = DES3_EDE_BLOCK_SIZE,
2316 },
2317 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2318 },
2319 {
2320 .name = "cbc(des)",
2321 .driver_name = "cbc-des-caam",
2322 .blocksize = DES_BLOCK_SIZE,
2323 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2324 .template_ablkcipher = {
2325 .setkey = ablkcipher_setkey,
2326 .encrypt = ablkcipher_encrypt,
2327 .decrypt = ablkcipher_decrypt,
2328 .geniv = "eseqiv",
2329 .min_keysize = DES_KEY_SIZE,
2330 .max_keysize = DES_KEY_SIZE,
2331 .ivsize = DES_BLOCK_SIZE,
2332 },
2333 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2334 }
8e8ec596
KP
2335};
2336
2337struct caam_crypto_alg {
2338 struct list_head entry;
8e8ec596
KP
2339 int class1_alg_type;
2340 int class2_alg_type;
2341 int alg_op;
2342 struct crypto_alg crypto_alg;
2343};
2344
2345static int caam_cra_init(struct crypto_tfm *tfm)
2346{
2347 struct crypto_alg *alg = tfm->__crt_alg;
2348 struct caam_crypto_alg *caam_alg =
2349 container_of(alg, struct caam_crypto_alg, crypto_alg);
2350 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
8e8ec596 2351
cfc6f11b
RG
2352 ctx->jrdev = caam_jr_alloc();
2353 if (IS_ERR(ctx->jrdev)) {
2354 pr_err("Job Ring Device allocation for transform failed\n");
2355 return PTR_ERR(ctx->jrdev);
2356 }
8e8ec596
KP
2357
2358 /* copy descriptor header template value */
2359 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2360 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2361 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2362
2363 return 0;
2364}
2365
2366static void caam_cra_exit(struct crypto_tfm *tfm)
2367{
2368 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2369
1acebad3
YK
2370 if (ctx->sh_desc_enc_dma &&
2371 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2372 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2373 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2374 if (ctx->sh_desc_dec_dma &&
2375 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2376 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2377 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2378 if (ctx->sh_desc_givenc_dma &&
2379 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2380 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2381 desc_bytes(ctx->sh_desc_givenc),
4427b1b4 2382 DMA_TO_DEVICE);
cfc6f11b
RG
2383
2384 caam_jr_free(ctx->jrdev);
8e8ec596
KP
2385}
2386
2387static void __exit caam_algapi_exit(void)
2388{
2389
8e8ec596 2390 struct caam_crypto_alg *t_alg, *n;
8e8ec596 2391
cfc6f11b 2392 if (!alg_list.next)
8e8ec596
KP
2393 return;
2394
cfc6f11b 2395 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
8e8ec596
KP
2396 crypto_unregister_alg(&t_alg->crypto_alg);
2397 list_del(&t_alg->entry);
2398 kfree(t_alg);
2399 }
8e8ec596
KP
2400}
2401
cfc6f11b 2402static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
8e8ec596
KP
2403 *template)
2404{
2405 struct caam_crypto_alg *t_alg;
2406 struct crypto_alg *alg;
2407
2408 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2409 if (!t_alg) {
cfc6f11b 2410 pr_err("failed to allocate t_alg\n");
8e8ec596
KP
2411 return ERR_PTR(-ENOMEM);
2412 }
2413
2414 alg = &t_alg->crypto_alg;
2415
2416 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2417 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2418 template->driver_name);
2419 alg->cra_module = THIS_MODULE;
2420 alg->cra_init = caam_cra_init;
2421 alg->cra_exit = caam_cra_exit;
2422 alg->cra_priority = CAAM_CRA_PRIORITY;
8e8ec596
KP
2423 alg->cra_blocksize = template->blocksize;
2424 alg->cra_alignmask = 0;
8e8ec596 2425 alg->cra_ctxsize = sizeof(struct caam_ctx);
d912bb76
NM
2426 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2427 template->type;
885e9e2f 2428 switch (template->type) {
acdca31d
YK
2429 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2430 alg->cra_type = &crypto_ablkcipher_type;
2431 alg->cra_ablkcipher = template->template_ablkcipher;
2432 break;
885e9e2f
YK
2433 case CRYPTO_ALG_TYPE_AEAD:
2434 alg->cra_type = &crypto_aead_type;
2435 alg->cra_aead = template->template_aead;
2436 break;
2437 }
8e8ec596
KP
2438
2439 t_alg->class1_alg_type = template->class1_alg_type;
2440 t_alg->class2_alg_type = template->class2_alg_type;
2441 t_alg->alg_op = template->alg_op;
8e8ec596
KP
2442
2443 return t_alg;
2444}
2445
2446static int __init caam_algapi_init(void)
2447{
8e8ec596
KP
2448 int i = 0, err = 0;
2449
cfc6f11b 2450 INIT_LIST_HEAD(&alg_list);
8e8ec596
KP
2451
2452 /* register crypto algorithms the device supports */
2453 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2454 /* TODO: check if h/w supports alg */
2455 struct caam_crypto_alg *t_alg;
2456
cfc6f11b 2457 t_alg = caam_alg_alloc(&driver_algs[i]);
8e8ec596
KP
2458 if (IS_ERR(t_alg)) {
2459 err = PTR_ERR(t_alg);
cfc6f11b
RG
2460 pr_warn("%s alg allocation failed\n",
2461 driver_algs[i].driver_name);
8e8ec596
KP
2462 continue;
2463 }
2464
2465 err = crypto_register_alg(&t_alg->crypto_alg);
2466 if (err) {
cfc6f11b 2467 pr_warn("%s alg registration failed\n",
8e8ec596
KP
2468 t_alg->crypto_alg.cra_driver_name);
2469 kfree(t_alg);
246bbedb 2470 } else
cfc6f11b 2471 list_add_tail(&t_alg->entry, &alg_list);
8e8ec596 2472 }
cfc6f11b
RG
2473 if (!list_empty(&alg_list))
2474 pr_info("caam algorithms registered in /proc/crypto\n");
8e8ec596
KP
2475
2476 return err;
2477}
2478
2479module_init(caam_algapi_init);
2480module_exit(caam_algapi_exit);
2481
2482MODULE_LICENSE("GPL");
2483MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2484MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
This page took 0.363695 seconds and 5 git commands to generate.