Merge remote-tracking branch 'iommu/next'
[deliverable/linux.git] / drivers / crypto / caam / caamalg.c
1 /*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
40 * | (output length) |
41 * | SEQ_IN_PTR |
42 * | (input buffer) |
43 * | (input length) |
44 * ---------------------
45 */
46
47 #include "compat.h"
48
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
56
57 /*
58 * crypto alg
59 */
60 #define CAAM_CRA_PRIORITY 3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 CTR_RFC3686_NONCE_SIZE + \
64 SHA512_DIGEST_SIZE * 2)
65 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66 #define CAAM_MAX_IV_LENGTH 16
67
68 #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69 #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
70 CAAM_CMD_SZ * 4)
71 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
72 CAAM_CMD_SZ * 5)
73
74 /* length of descriptors text */
75 #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
77 #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
78 #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
79
80 /* Note: Nonce is counted in enckeylen */
81 #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
82
83 #define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
84 #define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
85 #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
86
87 #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
88 #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
89 #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
90
91 #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
92 #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
93 #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
94
95 #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
96 #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
97 #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
98
99 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
100 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
101 20 * CAAM_CMD_SZ)
102 #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
103 15 * CAAM_CMD_SZ)
104
105 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
106 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
107
108 #ifdef DEBUG
109 /* for print_hex_dumps with line references */
110 #define debug(format, arg...) printk(format, arg)
111 #else
112 #define debug(format, arg...)
113 #endif
114 static struct list_head alg_list;
115
116 struct caam_alg_entry {
117 int class1_alg_type;
118 int class2_alg_type;
119 int alg_op;
120 bool rfc3686;
121 bool geniv;
122 };
123
124 struct caam_aead_alg {
125 struct aead_alg aead;
126 struct caam_alg_entry caam;
127 bool registered;
128 };
129
130 /* Set DK bit in class 1 operation if shared */
131 static inline void append_dec_op1(u32 *desc, u32 type)
132 {
133 u32 *jump_cmd, *uncond_jump_cmd;
134
135 /* DK bit is valid only for AES */
136 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
137 append_operation(desc, type | OP_ALG_AS_INITFINAL |
138 OP_ALG_DECRYPT);
139 return;
140 }
141
142 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
143 append_operation(desc, type | OP_ALG_AS_INITFINAL |
144 OP_ALG_DECRYPT);
145 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
146 set_jump_tgt_here(desc, jump_cmd);
147 append_operation(desc, type | OP_ALG_AS_INITFINAL |
148 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
149 set_jump_tgt_here(desc, uncond_jump_cmd);
150 }
151
152 /*
153 * For aead functions, read payload and write payload,
154 * both of which are specified in req->src and req->dst
155 */
156 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
157 {
158 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
159 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
160 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
161 }
162
163 /*
164 * For ablkcipher encrypt and decrypt, read from req->src and
165 * write to req->dst
166 */
167 static inline void ablkcipher_append_src_dst(u32 *desc)
168 {
169 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
170 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
171 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
172 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
173 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
174 }
175
176 /*
177 * per-session context
178 */
179 struct caam_ctx {
180 struct device *jrdev;
181 u32 sh_desc_enc[DESC_MAX_USED_LEN];
182 u32 sh_desc_dec[DESC_MAX_USED_LEN];
183 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
184 dma_addr_t sh_desc_enc_dma;
185 dma_addr_t sh_desc_dec_dma;
186 dma_addr_t sh_desc_givenc_dma;
187 u32 class1_alg_type;
188 u32 class2_alg_type;
189 u32 alg_op;
190 u8 key[CAAM_MAX_KEY_SIZE];
191 dma_addr_t key_dma;
192 unsigned int enckeylen;
193 unsigned int split_key_len;
194 unsigned int split_key_pad_len;
195 unsigned int authsize;
196 };
197
198 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
199 int keys_fit_inline, bool is_rfc3686)
200 {
201 u32 *nonce;
202 unsigned int enckeylen = ctx->enckeylen;
203
204 /*
205 * RFC3686 specific:
206 * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
207 * | enckeylen = encryption key size + nonce size
208 */
209 if (is_rfc3686)
210 enckeylen -= CTR_RFC3686_NONCE_SIZE;
211
212 if (keys_fit_inline) {
213 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
214 ctx->split_key_len, CLASS_2 |
215 KEY_DEST_MDHA_SPLIT | KEY_ENC);
216 append_key_as_imm(desc, (void *)ctx->key +
217 ctx->split_key_pad_len, enckeylen,
218 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
219 } else {
220 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
221 KEY_DEST_MDHA_SPLIT | KEY_ENC);
222 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
223 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
224 }
225
226 /* Load Counter into CONTEXT1 reg */
227 if (is_rfc3686) {
228 nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
229 enckeylen);
230 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
231 LDST_CLASS_IND_CCB |
232 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
233 append_move(desc,
234 MOVE_SRC_OUTFIFO |
235 MOVE_DEST_CLASS1CTX |
236 (16 << MOVE_OFFSET_SHIFT) |
237 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
238 }
239 }
240
241 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
242 int keys_fit_inline, bool is_rfc3686)
243 {
244 u32 *key_jump_cmd;
245
246 /* Note: Context registers are saved. */
247 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
248
249 /* Skip if already shared */
250 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
251 JUMP_COND_SHRD);
252
253 append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
254
255 set_jump_tgt_here(desc, key_jump_cmd);
256 }
257
258 static int aead_null_set_sh_desc(struct crypto_aead *aead)
259 {
260 struct caam_ctx *ctx = crypto_aead_ctx(aead);
261 struct device *jrdev = ctx->jrdev;
262 bool keys_fit_inline = false;
263 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
264 u32 *desc;
265
266 /*
267 * Job Descriptor and Shared Descriptors
268 * must all fit into the 64-word Descriptor h/w Buffer
269 */
270 if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
271 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
272 keys_fit_inline = true;
273
274 /* aead_encrypt shared descriptor */
275 desc = ctx->sh_desc_enc;
276
277 init_sh_desc(desc, HDR_SHARE_SERIAL);
278
279 /* Skip if already shared */
280 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
281 JUMP_COND_SHRD);
282 if (keys_fit_inline)
283 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
284 ctx->split_key_len, CLASS_2 |
285 KEY_DEST_MDHA_SPLIT | KEY_ENC);
286 else
287 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
288 KEY_DEST_MDHA_SPLIT | KEY_ENC);
289 set_jump_tgt_here(desc, key_jump_cmd);
290
291 /* assoclen + cryptlen = seqinlen */
292 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
293
294 /* Prepare to read and write cryptlen + assoclen bytes */
295 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
296 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
297
298 /*
299 * MOVE_LEN opcode is not available in all SEC HW revisions,
300 * thus need to do some magic, i.e. self-patch the descriptor
301 * buffer.
302 */
303 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
304 MOVE_DEST_MATH3 |
305 (0x6 << MOVE_LEN_SHIFT));
306 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
307 MOVE_DEST_DESCBUF |
308 MOVE_WAITCOMP |
309 (0x8 << MOVE_LEN_SHIFT));
310
311 /* Class 2 operation */
312 append_operation(desc, ctx->class2_alg_type |
313 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
314
315 /* Read and write cryptlen bytes */
316 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
317
318 set_move_tgt_here(desc, read_move_cmd);
319 set_move_tgt_here(desc, write_move_cmd);
320 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
321 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
322 MOVE_AUX_LS);
323
324 /* Write ICV */
325 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
326 LDST_SRCDST_BYTE_CONTEXT);
327
328 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
329 desc_bytes(desc),
330 DMA_TO_DEVICE);
331 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
332 dev_err(jrdev, "unable to map shared descriptor\n");
333 return -ENOMEM;
334 }
335 #ifdef DEBUG
336 print_hex_dump(KERN_ERR,
337 "aead null enc shdesc@"__stringify(__LINE__)": ",
338 DUMP_PREFIX_ADDRESS, 16, 4, desc,
339 desc_bytes(desc), 1);
340 #endif
341
342 /*
343 * Job Descriptor and Shared Descriptors
344 * must all fit into the 64-word Descriptor h/w Buffer
345 */
346 keys_fit_inline = false;
347 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
348 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
349 keys_fit_inline = true;
350
351 desc = ctx->sh_desc_dec;
352
353 /* aead_decrypt shared descriptor */
354 init_sh_desc(desc, HDR_SHARE_SERIAL);
355
356 /* Skip if already shared */
357 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
358 JUMP_COND_SHRD);
359 if (keys_fit_inline)
360 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
361 ctx->split_key_len, CLASS_2 |
362 KEY_DEST_MDHA_SPLIT | KEY_ENC);
363 else
364 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
365 KEY_DEST_MDHA_SPLIT | KEY_ENC);
366 set_jump_tgt_here(desc, key_jump_cmd);
367
368 /* Class 2 operation */
369 append_operation(desc, ctx->class2_alg_type |
370 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
371
372 /* assoclen + cryptlen = seqoutlen */
373 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
374
375 /* Prepare to read and write cryptlen + assoclen bytes */
376 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
377 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
378
379 /*
380 * MOVE_LEN opcode is not available in all SEC HW revisions,
381 * thus need to do some magic, i.e. self-patch the descriptor
382 * buffer.
383 */
384 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
385 MOVE_DEST_MATH2 |
386 (0x6 << MOVE_LEN_SHIFT));
387 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
388 MOVE_DEST_DESCBUF |
389 MOVE_WAITCOMP |
390 (0x8 << MOVE_LEN_SHIFT));
391
392 /* Read and write cryptlen bytes */
393 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
394
395 /*
396 * Insert a NOP here, since we need at least 4 instructions between
397 * code patching the descriptor buffer and the location being patched.
398 */
399 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
400 set_jump_tgt_here(desc, jump_cmd);
401
402 set_move_tgt_here(desc, read_move_cmd);
403 set_move_tgt_here(desc, write_move_cmd);
404 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
405 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
406 MOVE_AUX_LS);
407 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
408
409 /* Load ICV */
410 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
411 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
412
413 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
414 desc_bytes(desc),
415 DMA_TO_DEVICE);
416 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
417 dev_err(jrdev, "unable to map shared descriptor\n");
418 return -ENOMEM;
419 }
420 #ifdef DEBUG
421 print_hex_dump(KERN_ERR,
422 "aead null dec shdesc@"__stringify(__LINE__)": ",
423 DUMP_PREFIX_ADDRESS, 16, 4, desc,
424 desc_bytes(desc), 1);
425 #endif
426
427 return 0;
428 }
429
430 static int aead_set_sh_desc(struct crypto_aead *aead)
431 {
432 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
433 struct caam_aead_alg, aead);
434 unsigned int ivsize = crypto_aead_ivsize(aead);
435 struct caam_ctx *ctx = crypto_aead_ctx(aead);
436 struct device *jrdev = ctx->jrdev;
437 bool keys_fit_inline;
438 u32 geniv, moveiv;
439 u32 ctx1_iv_off = 0;
440 u32 *desc;
441 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
442 OP_ALG_AAI_CTR_MOD128);
443 const bool is_rfc3686 = alg->caam.rfc3686;
444
445 if (!ctx->authsize)
446 return 0;
447
448 /* NULL encryption / decryption */
449 if (!ctx->enckeylen)
450 return aead_null_set_sh_desc(aead);
451
452 /*
453 * AES-CTR needs to load IV in CONTEXT1 reg
454 * at an offset of 128bits (16bytes)
455 * CONTEXT1[255:128] = IV
456 */
457 if (ctr_mode)
458 ctx1_iv_off = 16;
459
460 /*
461 * RFC3686 specific:
462 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
463 */
464 if (is_rfc3686)
465 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
466
467 if (alg->caam.geniv)
468 goto skip_enc;
469
470 /*
471 * Job Descriptor and Shared Descriptors
472 * must all fit into the 64-word Descriptor h/w Buffer
473 */
474 keys_fit_inline = false;
475 if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
476 ctx->split_key_pad_len + ctx->enckeylen +
477 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
478 CAAM_DESC_BYTES_MAX)
479 keys_fit_inline = true;
480
481 /* aead_encrypt shared descriptor */
482 desc = ctx->sh_desc_enc;
483
484 /* Note: Context registers are saved. */
485 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
486
487 /* Class 2 operation */
488 append_operation(desc, ctx->class2_alg_type |
489 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
490
491 /* Read and write assoclen bytes */
492 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
493 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
494
495 /* Skip assoc data */
496 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
497
498 /* read assoc before reading payload */
499 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
500 FIFOLDST_VLF);
501
502 /* Load Counter into CONTEXT1 reg */
503 if (is_rfc3686)
504 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
505 LDST_SRCDST_BYTE_CONTEXT |
506 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
507 LDST_OFFSET_SHIFT));
508
509 /* Class 1 operation */
510 append_operation(desc, ctx->class1_alg_type |
511 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
512
513 /* Read and write cryptlen bytes */
514 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
515 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
516 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
517
518 /* Write ICV */
519 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
520 LDST_SRCDST_BYTE_CONTEXT);
521
522 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
523 desc_bytes(desc),
524 DMA_TO_DEVICE);
525 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
526 dev_err(jrdev, "unable to map shared descriptor\n");
527 return -ENOMEM;
528 }
529 #ifdef DEBUG
530 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
531 DUMP_PREFIX_ADDRESS, 16, 4, desc,
532 desc_bytes(desc), 1);
533 #endif
534
535 skip_enc:
536 /*
537 * Job Descriptor and Shared Descriptors
538 * must all fit into the 64-word Descriptor h/w Buffer
539 */
540 keys_fit_inline = false;
541 if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
542 ctx->split_key_pad_len + ctx->enckeylen +
543 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
544 CAAM_DESC_BYTES_MAX)
545 keys_fit_inline = true;
546
547 /* aead_decrypt shared descriptor */
548 desc = ctx->sh_desc_dec;
549
550 /* Note: Context registers are saved. */
551 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
552
553 /* Class 2 operation */
554 append_operation(desc, ctx->class2_alg_type |
555 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
556
557 /* Read and write assoclen bytes */
558 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
559 if (alg->caam.geniv)
560 append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
561 else
562 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
563
564 /* Skip assoc data */
565 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
566
567 /* read assoc before reading payload */
568 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
569 KEY_VLF);
570
571 if (alg->caam.geniv) {
572 append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
573 LDST_SRCDST_BYTE_CONTEXT |
574 (ctx1_iv_off << LDST_OFFSET_SHIFT));
575 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
576 (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
577 }
578
579 /* Load Counter into CONTEXT1 reg */
580 if (is_rfc3686)
581 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
582 LDST_SRCDST_BYTE_CONTEXT |
583 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
584 LDST_OFFSET_SHIFT));
585
586 /* Choose operation */
587 if (ctr_mode)
588 append_operation(desc, ctx->class1_alg_type |
589 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
590 else
591 append_dec_op1(desc, ctx->class1_alg_type);
592
593 /* Read and write cryptlen bytes */
594 append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
595 append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
596 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
597
598 /* Load ICV */
599 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
600 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
601
602 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
603 desc_bytes(desc),
604 DMA_TO_DEVICE);
605 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
606 dev_err(jrdev, "unable to map shared descriptor\n");
607 return -ENOMEM;
608 }
609 #ifdef DEBUG
610 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
611 DUMP_PREFIX_ADDRESS, 16, 4, desc,
612 desc_bytes(desc), 1);
613 #endif
614
615 if (!alg->caam.geniv)
616 goto skip_givenc;
617
618 /*
619 * Job Descriptor and Shared Descriptors
620 * must all fit into the 64-word Descriptor h/w Buffer
621 */
622 keys_fit_inline = false;
623 if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
624 ctx->split_key_pad_len + ctx->enckeylen +
625 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
626 CAAM_DESC_BYTES_MAX)
627 keys_fit_inline = true;
628
629 /* aead_givencrypt shared descriptor */
630 desc = ctx->sh_desc_enc;
631
632 /* Note: Context registers are saved. */
633 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
634
635 if (is_rfc3686)
636 goto copy_iv;
637
638 /* Generate IV */
639 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
640 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
641 NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
642 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
643 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
644 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
645 append_move(desc, MOVE_WAITCOMP |
646 MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
647 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
648 (ivsize << MOVE_LEN_SHIFT));
649 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
650
651 copy_iv:
652 /* Copy IV to class 1 context */
653 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
654 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
655 (ivsize << MOVE_LEN_SHIFT));
656
657 /* Return to encryption */
658 append_operation(desc, ctx->class2_alg_type |
659 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
660
661 /* Read and write assoclen bytes */
662 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
663 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
664
665 /* ivsize + cryptlen = seqoutlen - authsize */
666 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
667
668 /* Skip assoc data */
669 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
670
671 /* read assoc before reading payload */
672 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
673 KEY_VLF);
674
675 /* Copy iv from outfifo to class 2 fifo */
676 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
677 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
678 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
679 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
680 append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
681 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
682
683 /* Load Counter into CONTEXT1 reg */
684 if (is_rfc3686)
685 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
686 LDST_SRCDST_BYTE_CONTEXT |
687 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
688 LDST_OFFSET_SHIFT));
689
690 /* Class 1 operation */
691 append_operation(desc, ctx->class1_alg_type |
692 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
693
694 /* Will write ivsize + cryptlen */
695 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
696
697 /* Not need to reload iv */
698 append_seq_fifo_load(desc, ivsize,
699 FIFOLD_CLASS_SKIP);
700
701 /* Will read cryptlen */
702 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
703 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
704
705 /* Write ICV */
706 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
707 LDST_SRCDST_BYTE_CONTEXT);
708
709 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
710 desc_bytes(desc),
711 DMA_TO_DEVICE);
712 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
713 dev_err(jrdev, "unable to map shared descriptor\n");
714 return -ENOMEM;
715 }
716 #ifdef DEBUG
717 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
718 DUMP_PREFIX_ADDRESS, 16, 4, desc,
719 desc_bytes(desc), 1);
720 #endif
721
722 skip_givenc:
723 return 0;
724 }
725
726 static int aead_setauthsize(struct crypto_aead *authenc,
727 unsigned int authsize)
728 {
729 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
730
731 ctx->authsize = authsize;
732 aead_set_sh_desc(authenc);
733
734 return 0;
735 }
736
737 static int gcm_set_sh_desc(struct crypto_aead *aead)
738 {
739 struct caam_ctx *ctx = crypto_aead_ctx(aead);
740 struct device *jrdev = ctx->jrdev;
741 bool keys_fit_inline = false;
742 u32 *key_jump_cmd, *zero_payload_jump_cmd,
743 *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
744 u32 *desc;
745
746 if (!ctx->enckeylen || !ctx->authsize)
747 return 0;
748
749 /*
750 * AES GCM encrypt shared descriptor
751 * Job Descriptor and Shared Descriptor
752 * must fit into the 64-word Descriptor h/w Buffer
753 */
754 if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
755 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
756 keys_fit_inline = true;
757
758 desc = ctx->sh_desc_enc;
759
760 init_sh_desc(desc, HDR_SHARE_SERIAL);
761
762 /* skip key loading if they are loaded due to sharing */
763 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
764 JUMP_COND_SHRD | JUMP_COND_SELF);
765 if (keys_fit_inline)
766 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
767 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
768 else
769 append_key(desc, ctx->key_dma, ctx->enckeylen,
770 CLASS_1 | KEY_DEST_CLASS_REG);
771 set_jump_tgt_here(desc, key_jump_cmd);
772
773 /* class 1 operation */
774 append_operation(desc, ctx->class1_alg_type |
775 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
776
777 /* if assoclen + cryptlen is ZERO, skip to ICV write */
778 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
779 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
780 JUMP_COND_MATH_Z);
781
782 /* if assoclen is ZERO, skip reading the assoc data */
783 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
784 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
785 JUMP_COND_MATH_Z);
786
787 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
788
789 /* skip assoc data */
790 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
791
792 /* cryptlen = seqinlen - assoclen */
793 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
794
795 /* if cryptlen is ZERO jump to zero-payload commands */
796 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
797 JUMP_COND_MATH_Z);
798
799 /* read assoc data */
800 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
801 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
802 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
803
804 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
805
806 /* write encrypted data */
807 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
808
809 /* read payload data */
810 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
811 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
812
813 /* jump the zero-payload commands */
814 append_jump(desc, JUMP_TEST_ALL | 2);
815
816 /* zero-payload commands */
817 set_jump_tgt_here(desc, zero_payload_jump_cmd);
818
819 /* read assoc data */
820 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
821 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
822
823 /* There is no input data */
824 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
825
826 /* write ICV */
827 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
828 LDST_SRCDST_BYTE_CONTEXT);
829
830 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
831 desc_bytes(desc),
832 DMA_TO_DEVICE);
833 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
834 dev_err(jrdev, "unable to map shared descriptor\n");
835 return -ENOMEM;
836 }
837 #ifdef DEBUG
838 print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
839 DUMP_PREFIX_ADDRESS, 16, 4, desc,
840 desc_bytes(desc), 1);
841 #endif
842
843 /*
844 * Job Descriptor and Shared Descriptors
845 * must all fit into the 64-word Descriptor h/w Buffer
846 */
847 keys_fit_inline = false;
848 if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
849 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
850 keys_fit_inline = true;
851
852 desc = ctx->sh_desc_dec;
853
854 init_sh_desc(desc, HDR_SHARE_SERIAL);
855
856 /* skip key loading if they are loaded due to sharing */
857 key_jump_cmd = append_jump(desc, JUMP_JSL |
858 JUMP_TEST_ALL | JUMP_COND_SHRD |
859 JUMP_COND_SELF);
860 if (keys_fit_inline)
861 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
862 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
863 else
864 append_key(desc, ctx->key_dma, ctx->enckeylen,
865 CLASS_1 | KEY_DEST_CLASS_REG);
866 set_jump_tgt_here(desc, key_jump_cmd);
867
868 /* class 1 operation */
869 append_operation(desc, ctx->class1_alg_type |
870 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
871
872 /* if assoclen is ZERO, skip reading the assoc data */
873 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
874 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
875 JUMP_COND_MATH_Z);
876
877 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
878
879 /* skip assoc data */
880 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
881
882 /* read assoc data */
883 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
884 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
885
886 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
887
888 /* cryptlen = seqoutlen - assoclen */
889 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
890
891 /* jump to zero-payload command if cryptlen is zero */
892 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
893 JUMP_COND_MATH_Z);
894
895 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
896
897 /* store encrypted data */
898 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
899
900 /* read payload data */
901 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
902 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
903
904 /* zero-payload command */
905 set_jump_tgt_here(desc, zero_payload_jump_cmd);
906
907 /* read ICV */
908 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
909 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
910
911 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
912 desc_bytes(desc),
913 DMA_TO_DEVICE);
914 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
915 dev_err(jrdev, "unable to map shared descriptor\n");
916 return -ENOMEM;
917 }
918 #ifdef DEBUG
919 print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
920 DUMP_PREFIX_ADDRESS, 16, 4, desc,
921 desc_bytes(desc), 1);
922 #endif
923
924 return 0;
925 }
926
927 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
928 {
929 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
930
931 ctx->authsize = authsize;
932 gcm_set_sh_desc(authenc);
933
934 return 0;
935 }
936
937 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
938 {
939 struct caam_ctx *ctx = crypto_aead_ctx(aead);
940 struct device *jrdev = ctx->jrdev;
941 bool keys_fit_inline = false;
942 u32 *key_jump_cmd;
943 u32 *desc;
944
945 if (!ctx->enckeylen || !ctx->authsize)
946 return 0;
947
948 /*
949 * RFC4106 encrypt shared descriptor
950 * Job Descriptor and Shared Descriptor
951 * must fit into the 64-word Descriptor h/w Buffer
952 */
953 if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
954 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
955 keys_fit_inline = true;
956
957 desc = ctx->sh_desc_enc;
958
959 init_sh_desc(desc, HDR_SHARE_SERIAL);
960
961 /* Skip key loading if it is loaded due to sharing */
962 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
963 JUMP_COND_SHRD);
964 if (keys_fit_inline)
965 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
966 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
967 else
968 append_key(desc, ctx->key_dma, ctx->enckeylen,
969 CLASS_1 | KEY_DEST_CLASS_REG);
970 set_jump_tgt_here(desc, key_jump_cmd);
971
972 /* Class 1 operation */
973 append_operation(desc, ctx->class1_alg_type |
974 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
975
976 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
977 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
978
979 /* Read assoc data */
980 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
981 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
982
983 /* Skip IV */
984 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
985
986 /* Will read cryptlen bytes */
987 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
988
989 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
990 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
991
992 /* Skip assoc data */
993 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
994
995 /* cryptlen = seqoutlen - assoclen */
996 append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
997
998 /* Write encrypted data */
999 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1000
1001 /* Read payload data */
1002 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1003 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1004
1005 /* Write ICV */
1006 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1007 LDST_SRCDST_BYTE_CONTEXT);
1008
1009 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1010 desc_bytes(desc),
1011 DMA_TO_DEVICE);
1012 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1013 dev_err(jrdev, "unable to map shared descriptor\n");
1014 return -ENOMEM;
1015 }
1016 #ifdef DEBUG
1017 print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1018 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1019 desc_bytes(desc), 1);
1020 #endif
1021
1022 /*
1023 * Job Descriptor and Shared Descriptors
1024 * must all fit into the 64-word Descriptor h/w Buffer
1025 */
1026 keys_fit_inline = false;
1027 if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
1028 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1029 keys_fit_inline = true;
1030
1031 desc = ctx->sh_desc_dec;
1032
1033 init_sh_desc(desc, HDR_SHARE_SERIAL);
1034
1035 /* Skip key loading if it is loaded due to sharing */
1036 key_jump_cmd = append_jump(desc, JUMP_JSL |
1037 JUMP_TEST_ALL | JUMP_COND_SHRD);
1038 if (keys_fit_inline)
1039 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1040 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1041 else
1042 append_key(desc, ctx->key_dma, ctx->enckeylen,
1043 CLASS_1 | KEY_DEST_CLASS_REG);
1044 set_jump_tgt_here(desc, key_jump_cmd);
1045
1046 /* Class 1 operation */
1047 append_operation(desc, ctx->class1_alg_type |
1048 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1049
1050 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
1051 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1052
1053 /* Read assoc data */
1054 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1055 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1056
1057 /* Skip IV */
1058 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1059
1060 /* Will read cryptlen bytes */
1061 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
1062
1063 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1064 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
1065
1066 /* Skip assoc data */
1067 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1068
1069 /* Will write cryptlen bytes */
1070 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1071
1072 /* Store payload data */
1073 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1074
1075 /* Read encrypted data */
1076 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1077 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1078
1079 /* Read ICV */
1080 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1081 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1082
1083 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1084 desc_bytes(desc),
1085 DMA_TO_DEVICE);
1086 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1087 dev_err(jrdev, "unable to map shared descriptor\n");
1088 return -ENOMEM;
1089 }
1090 #ifdef DEBUG
1091 print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1092 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1093 desc_bytes(desc), 1);
1094 #endif
1095
1096 return 0;
1097 }
1098
1099 static int rfc4106_setauthsize(struct crypto_aead *authenc,
1100 unsigned int authsize)
1101 {
1102 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1103
1104 ctx->authsize = authsize;
1105 rfc4106_set_sh_desc(authenc);
1106
1107 return 0;
1108 }
1109
1110 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1111 {
1112 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1113 struct device *jrdev = ctx->jrdev;
1114 bool keys_fit_inline = false;
1115 u32 *key_jump_cmd;
1116 u32 *read_move_cmd, *write_move_cmd;
1117 u32 *desc;
1118
1119 if (!ctx->enckeylen || !ctx->authsize)
1120 return 0;
1121
1122 /*
1123 * RFC4543 encrypt shared descriptor
1124 * Job Descriptor and Shared Descriptor
1125 * must fit into the 64-word Descriptor h/w Buffer
1126 */
1127 if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
1128 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1129 keys_fit_inline = true;
1130
1131 desc = ctx->sh_desc_enc;
1132
1133 init_sh_desc(desc, HDR_SHARE_SERIAL);
1134
1135 /* Skip key loading if it is loaded due to sharing */
1136 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1137 JUMP_COND_SHRD);
1138 if (keys_fit_inline)
1139 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1140 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1141 else
1142 append_key(desc, ctx->key_dma, ctx->enckeylen,
1143 CLASS_1 | KEY_DEST_CLASS_REG);
1144 set_jump_tgt_here(desc, key_jump_cmd);
1145
1146 /* Class 1 operation */
1147 append_operation(desc, ctx->class1_alg_type |
1148 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1149
1150 /* assoclen + cryptlen = seqinlen */
1151 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
1152
1153 /*
1154 * MOVE_LEN opcode is not available in all SEC HW revisions,
1155 * thus need to do some magic, i.e. self-patch the descriptor
1156 * buffer.
1157 */
1158 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1159 (0x6 << MOVE_LEN_SHIFT));
1160 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1161 (0x8 << MOVE_LEN_SHIFT));
1162
1163 /* Will read assoclen + cryptlen bytes */
1164 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1165
1166 /* Will write assoclen + cryptlen bytes */
1167 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1168
1169 /* Read and write assoclen + cryptlen bytes */
1170 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1171
1172 set_move_tgt_here(desc, read_move_cmd);
1173 set_move_tgt_here(desc, write_move_cmd);
1174 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1175 /* Move payload data to OFIFO */
1176 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1177
1178 /* Write ICV */
1179 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1180 LDST_SRCDST_BYTE_CONTEXT);
1181
1182 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1183 desc_bytes(desc),
1184 DMA_TO_DEVICE);
1185 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1186 dev_err(jrdev, "unable to map shared descriptor\n");
1187 return -ENOMEM;
1188 }
1189 #ifdef DEBUG
1190 print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1191 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1192 desc_bytes(desc), 1);
1193 #endif
1194
1195 /*
1196 * Job Descriptor and Shared Descriptors
1197 * must all fit into the 64-word Descriptor h/w Buffer
1198 */
1199 keys_fit_inline = false;
1200 if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
1201 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1202 keys_fit_inline = true;
1203
1204 desc = ctx->sh_desc_dec;
1205
1206 init_sh_desc(desc, HDR_SHARE_SERIAL);
1207
1208 /* Skip key loading if it is loaded due to sharing */
1209 key_jump_cmd = append_jump(desc, JUMP_JSL |
1210 JUMP_TEST_ALL | JUMP_COND_SHRD);
1211 if (keys_fit_inline)
1212 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1213 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1214 else
1215 append_key(desc, ctx->key_dma, ctx->enckeylen,
1216 CLASS_1 | KEY_DEST_CLASS_REG);
1217 set_jump_tgt_here(desc, key_jump_cmd);
1218
1219 /* Class 1 operation */
1220 append_operation(desc, ctx->class1_alg_type |
1221 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1222
1223 /* assoclen + cryptlen = seqoutlen */
1224 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1225
1226 /*
1227 * MOVE_LEN opcode is not available in all SEC HW revisions,
1228 * thus need to do some magic, i.e. self-patch the descriptor
1229 * buffer.
1230 */
1231 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1232 (0x6 << MOVE_LEN_SHIFT));
1233 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1234 (0x8 << MOVE_LEN_SHIFT));
1235
1236 /* Will read assoclen + cryptlen bytes */
1237 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1238
1239 /* Will write assoclen + cryptlen bytes */
1240 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1241
1242 /* Store payload data */
1243 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1244
1245 /* In-snoop assoclen + cryptlen data */
1246 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1247 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1248
1249 set_move_tgt_here(desc, read_move_cmd);
1250 set_move_tgt_here(desc, write_move_cmd);
1251 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1252 /* Move payload data to OFIFO */
1253 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1254 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1255
1256 /* Read ICV */
1257 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1258 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1259
1260 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1261 desc_bytes(desc),
1262 DMA_TO_DEVICE);
1263 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1264 dev_err(jrdev, "unable to map shared descriptor\n");
1265 return -ENOMEM;
1266 }
1267 #ifdef DEBUG
1268 print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1269 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1270 desc_bytes(desc), 1);
1271 #endif
1272
1273 return 0;
1274 }
1275
1276 static int rfc4543_setauthsize(struct crypto_aead *authenc,
1277 unsigned int authsize)
1278 {
1279 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1280
1281 ctx->authsize = authsize;
1282 rfc4543_set_sh_desc(authenc);
1283
1284 return 0;
1285 }
1286
1287 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1288 u32 authkeylen)
1289 {
1290 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1291 ctx->split_key_pad_len, key_in, authkeylen,
1292 ctx->alg_op);
1293 }
1294
1295 static int aead_setkey(struct crypto_aead *aead,
1296 const u8 *key, unsigned int keylen)
1297 {
1298 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1299 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1300 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1301 struct device *jrdev = ctx->jrdev;
1302 struct crypto_authenc_keys keys;
1303 int ret = 0;
1304
1305 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1306 goto badkey;
1307
1308 /* Pick class 2 key length from algorithm submask */
1309 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1310 OP_ALG_ALGSEL_SHIFT] * 2;
1311 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1312
1313 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1314 goto badkey;
1315
1316 #ifdef DEBUG
1317 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
1318 keys.authkeylen + keys.enckeylen, keys.enckeylen,
1319 keys.authkeylen);
1320 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1321 ctx->split_key_len, ctx->split_key_pad_len);
1322 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1323 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1324 #endif
1325
1326 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
1327 if (ret) {
1328 goto badkey;
1329 }
1330
1331 /* postpend encryption key to auth split key */
1332 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
1333
1334 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
1335 keys.enckeylen, DMA_TO_DEVICE);
1336 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1337 dev_err(jrdev, "unable to map key i/o memory\n");
1338 return -ENOMEM;
1339 }
1340 #ifdef DEBUG
1341 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
1342 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
1343 ctx->split_key_pad_len + keys.enckeylen, 1);
1344 #endif
1345
1346 ctx->enckeylen = keys.enckeylen;
1347
1348 ret = aead_set_sh_desc(aead);
1349 if (ret) {
1350 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
1351 keys.enckeylen, DMA_TO_DEVICE);
1352 }
1353
1354 return ret;
1355 badkey:
1356 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1357 return -EINVAL;
1358 }
1359
1360 static int gcm_setkey(struct crypto_aead *aead,
1361 const u8 *key, unsigned int keylen)
1362 {
1363 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1364 struct device *jrdev = ctx->jrdev;
1365 int ret = 0;
1366
1367 #ifdef DEBUG
1368 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1369 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1370 #endif
1371
1372 memcpy(ctx->key, key, keylen);
1373 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1374 DMA_TO_DEVICE);
1375 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1376 dev_err(jrdev, "unable to map key i/o memory\n");
1377 return -ENOMEM;
1378 }
1379 ctx->enckeylen = keylen;
1380
1381 ret = gcm_set_sh_desc(aead);
1382 if (ret) {
1383 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1384 DMA_TO_DEVICE);
1385 }
1386
1387 return ret;
1388 }
1389
1390 static int rfc4106_setkey(struct crypto_aead *aead,
1391 const u8 *key, unsigned int keylen)
1392 {
1393 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1394 struct device *jrdev = ctx->jrdev;
1395 int ret = 0;
1396
1397 if (keylen < 4)
1398 return -EINVAL;
1399
1400 #ifdef DEBUG
1401 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1402 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1403 #endif
1404
1405 memcpy(ctx->key, key, keylen);
1406
1407 /*
1408 * The last four bytes of the key material are used as the salt value
1409 * in the nonce. Update the AES key length.
1410 */
1411 ctx->enckeylen = keylen - 4;
1412
1413 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1414 DMA_TO_DEVICE);
1415 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1416 dev_err(jrdev, "unable to map key i/o memory\n");
1417 return -ENOMEM;
1418 }
1419
1420 ret = rfc4106_set_sh_desc(aead);
1421 if (ret) {
1422 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1423 DMA_TO_DEVICE);
1424 }
1425
1426 return ret;
1427 }
1428
1429 static int rfc4543_setkey(struct crypto_aead *aead,
1430 const u8 *key, unsigned int keylen)
1431 {
1432 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1433 struct device *jrdev = ctx->jrdev;
1434 int ret = 0;
1435
1436 if (keylen < 4)
1437 return -EINVAL;
1438
1439 #ifdef DEBUG
1440 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1441 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1442 #endif
1443
1444 memcpy(ctx->key, key, keylen);
1445
1446 /*
1447 * The last four bytes of the key material are used as the salt value
1448 * in the nonce. Update the AES key length.
1449 */
1450 ctx->enckeylen = keylen - 4;
1451
1452 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1453 DMA_TO_DEVICE);
1454 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1455 dev_err(jrdev, "unable to map key i/o memory\n");
1456 return -ENOMEM;
1457 }
1458
1459 ret = rfc4543_set_sh_desc(aead);
1460 if (ret) {
1461 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1462 DMA_TO_DEVICE);
1463 }
1464
1465 return ret;
1466 }
1467
1468 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1469 const u8 *key, unsigned int keylen)
1470 {
1471 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1472 struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1473 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1474 const char *alg_name = crypto_tfm_alg_name(tfm);
1475 struct device *jrdev = ctx->jrdev;
1476 int ret = 0;
1477 u32 *key_jump_cmd;
1478 u32 *desc;
1479 u8 *nonce;
1480 u32 geniv;
1481 u32 ctx1_iv_off = 0;
1482 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1483 OP_ALG_AAI_CTR_MOD128);
1484 const bool is_rfc3686 = (ctr_mode &&
1485 (strstr(alg_name, "rfc3686") != NULL));
1486
1487 #ifdef DEBUG
1488 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1489 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1490 #endif
1491 /*
1492 * AES-CTR needs to load IV in CONTEXT1 reg
1493 * at an offset of 128bits (16bytes)
1494 * CONTEXT1[255:128] = IV
1495 */
1496 if (ctr_mode)
1497 ctx1_iv_off = 16;
1498
1499 /*
1500 * RFC3686 specific:
1501 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1502 * | *key = {KEY, NONCE}
1503 */
1504 if (is_rfc3686) {
1505 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1506 keylen -= CTR_RFC3686_NONCE_SIZE;
1507 }
1508
1509 memcpy(ctx->key, key, keylen);
1510 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1511 DMA_TO_DEVICE);
1512 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1513 dev_err(jrdev, "unable to map key i/o memory\n");
1514 return -ENOMEM;
1515 }
1516 ctx->enckeylen = keylen;
1517
1518 /* ablkcipher_encrypt shared descriptor */
1519 desc = ctx->sh_desc_enc;
1520 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1521 /* Skip if already shared */
1522 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1523 JUMP_COND_SHRD);
1524
1525 /* Load class1 key only */
1526 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1527 ctx->enckeylen, CLASS_1 |
1528 KEY_DEST_CLASS_REG);
1529
1530 /* Load nonce into CONTEXT1 reg */
1531 if (is_rfc3686) {
1532 nonce = (u8 *)key + keylen;
1533 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1534 LDST_CLASS_IND_CCB |
1535 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1536 append_move(desc, MOVE_WAITCOMP |
1537 MOVE_SRC_OUTFIFO |
1538 MOVE_DEST_CLASS1CTX |
1539 (16 << MOVE_OFFSET_SHIFT) |
1540 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1541 }
1542
1543 set_jump_tgt_here(desc, key_jump_cmd);
1544
1545 /* Load iv */
1546 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1547 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1548
1549 /* Load counter into CONTEXT1 reg */
1550 if (is_rfc3686)
1551 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1552 LDST_SRCDST_BYTE_CONTEXT |
1553 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1554 LDST_OFFSET_SHIFT));
1555
1556 /* Load operation */
1557 append_operation(desc, ctx->class1_alg_type |
1558 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1559
1560 /* Perform operation */
1561 ablkcipher_append_src_dst(desc);
1562
1563 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1564 desc_bytes(desc),
1565 DMA_TO_DEVICE);
1566 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1567 dev_err(jrdev, "unable to map shared descriptor\n");
1568 return -ENOMEM;
1569 }
1570 #ifdef DEBUG
1571 print_hex_dump(KERN_ERR,
1572 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
1573 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1574 desc_bytes(desc), 1);
1575 #endif
1576 /* ablkcipher_decrypt shared descriptor */
1577 desc = ctx->sh_desc_dec;
1578
1579 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1580 /* Skip if already shared */
1581 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1582 JUMP_COND_SHRD);
1583
1584 /* Load class1 key only */
1585 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1586 ctx->enckeylen, CLASS_1 |
1587 KEY_DEST_CLASS_REG);
1588
1589 /* Load nonce into CONTEXT1 reg */
1590 if (is_rfc3686) {
1591 nonce = (u8 *)key + keylen;
1592 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1593 LDST_CLASS_IND_CCB |
1594 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1595 append_move(desc, MOVE_WAITCOMP |
1596 MOVE_SRC_OUTFIFO |
1597 MOVE_DEST_CLASS1CTX |
1598 (16 << MOVE_OFFSET_SHIFT) |
1599 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1600 }
1601
1602 set_jump_tgt_here(desc, key_jump_cmd);
1603
1604 /* load IV */
1605 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1606 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1607
1608 /* Load counter into CONTEXT1 reg */
1609 if (is_rfc3686)
1610 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1611 LDST_SRCDST_BYTE_CONTEXT |
1612 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1613 LDST_OFFSET_SHIFT));
1614
1615 /* Choose operation */
1616 if (ctr_mode)
1617 append_operation(desc, ctx->class1_alg_type |
1618 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1619 else
1620 append_dec_op1(desc, ctx->class1_alg_type);
1621
1622 /* Perform operation */
1623 ablkcipher_append_src_dst(desc);
1624
1625 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1626 desc_bytes(desc),
1627 DMA_TO_DEVICE);
1628 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1629 dev_err(jrdev, "unable to map shared descriptor\n");
1630 return -ENOMEM;
1631 }
1632
1633 #ifdef DEBUG
1634 print_hex_dump(KERN_ERR,
1635 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
1636 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1637 desc_bytes(desc), 1);
1638 #endif
1639 /* ablkcipher_givencrypt shared descriptor */
1640 desc = ctx->sh_desc_givenc;
1641
1642 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1643 /* Skip if already shared */
1644 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1645 JUMP_COND_SHRD);
1646
1647 /* Load class1 key only */
1648 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1649 ctx->enckeylen, CLASS_1 |
1650 KEY_DEST_CLASS_REG);
1651
1652 /* Load Nonce into CONTEXT1 reg */
1653 if (is_rfc3686) {
1654 nonce = (u8 *)key + keylen;
1655 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1656 LDST_CLASS_IND_CCB |
1657 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1658 append_move(desc, MOVE_WAITCOMP |
1659 MOVE_SRC_OUTFIFO |
1660 MOVE_DEST_CLASS1CTX |
1661 (16 << MOVE_OFFSET_SHIFT) |
1662 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1663 }
1664 set_jump_tgt_here(desc, key_jump_cmd);
1665
1666 /* Generate IV */
1667 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1668 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1669 NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
1670 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1671 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1672 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1673 append_move(desc, MOVE_WAITCOMP |
1674 MOVE_SRC_INFIFO |
1675 MOVE_DEST_CLASS1CTX |
1676 (crt->ivsize << MOVE_LEN_SHIFT) |
1677 (ctx1_iv_off << MOVE_OFFSET_SHIFT));
1678 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1679
1680 /* Copy generated IV to memory */
1681 append_seq_store(desc, crt->ivsize,
1682 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1683 (ctx1_iv_off << LDST_OFFSET_SHIFT));
1684
1685 /* Load Counter into CONTEXT1 reg */
1686 if (is_rfc3686)
1687 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1688 LDST_SRCDST_BYTE_CONTEXT |
1689 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1690 LDST_OFFSET_SHIFT));
1691
1692 if (ctx1_iv_off)
1693 append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
1694 (1 << JUMP_OFFSET_SHIFT));
1695
1696 /* Load operation */
1697 append_operation(desc, ctx->class1_alg_type |
1698 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1699
1700 /* Perform operation */
1701 ablkcipher_append_src_dst(desc);
1702
1703 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1704 desc_bytes(desc),
1705 DMA_TO_DEVICE);
1706 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1707 dev_err(jrdev, "unable to map shared descriptor\n");
1708 return -ENOMEM;
1709 }
1710 #ifdef DEBUG
1711 print_hex_dump(KERN_ERR,
1712 "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
1713 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1714 desc_bytes(desc), 1);
1715 #endif
1716
1717 return ret;
1718 }
1719
1720 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1721 const u8 *key, unsigned int keylen)
1722 {
1723 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1724 struct device *jrdev = ctx->jrdev;
1725 u32 *key_jump_cmd, *desc;
1726 __be64 sector_size = cpu_to_be64(512);
1727
1728 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
1729 crypto_ablkcipher_set_flags(ablkcipher,
1730 CRYPTO_TFM_RES_BAD_KEY_LEN);
1731 dev_err(jrdev, "key size mismatch\n");
1732 return -EINVAL;
1733 }
1734
1735 memcpy(ctx->key, key, keylen);
1736 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
1737 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1738 dev_err(jrdev, "unable to map key i/o memory\n");
1739 return -ENOMEM;
1740 }
1741 ctx->enckeylen = keylen;
1742
1743 /* xts_ablkcipher_encrypt shared descriptor */
1744 desc = ctx->sh_desc_enc;
1745 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1746 /* Skip if already shared */
1747 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1748 JUMP_COND_SHRD);
1749
1750 /* Load class1 keys only */
1751 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1752 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1753
1754 /* Load sector size with index 40 bytes (0x28) */
1755 append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1756 LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1757 append_data(desc, (void *)&sector_size, 8);
1758
1759 set_jump_tgt_here(desc, key_jump_cmd);
1760
1761 /*
1762 * create sequence for loading the sector index
1763 * Upper 8B of IV - will be used as sector index
1764 * Lower 8B of IV - will be discarded
1765 */
1766 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1767 LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1768 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1769
1770 /* Load operation */
1771 append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
1772 OP_ALG_ENCRYPT);
1773
1774 /* Perform operation */
1775 ablkcipher_append_src_dst(desc);
1776
1777 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1778 DMA_TO_DEVICE);
1779 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1780 dev_err(jrdev, "unable to map shared descriptor\n");
1781 return -ENOMEM;
1782 }
1783 #ifdef DEBUG
1784 print_hex_dump(KERN_ERR,
1785 "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
1786 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1787 #endif
1788
1789 /* xts_ablkcipher_decrypt shared descriptor */
1790 desc = ctx->sh_desc_dec;
1791
1792 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1793 /* Skip if already shared */
1794 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1795 JUMP_COND_SHRD);
1796
1797 /* Load class1 key only */
1798 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1799 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1800
1801 /* Load sector size with index 40 bytes (0x28) */
1802 append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1803 LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1804 append_data(desc, (void *)&sector_size, 8);
1805
1806 set_jump_tgt_here(desc, key_jump_cmd);
1807
1808 /*
1809 * create sequence for loading the sector index
1810 * Upper 8B of IV - will be used as sector index
1811 * Lower 8B of IV - will be discarded
1812 */
1813 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1814 LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1815 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1816
1817 /* Load operation */
1818 append_dec_op1(desc, ctx->class1_alg_type);
1819
1820 /* Perform operation */
1821 ablkcipher_append_src_dst(desc);
1822
1823 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1824 DMA_TO_DEVICE);
1825 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1826 dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
1827 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
1828 dev_err(jrdev, "unable to map shared descriptor\n");
1829 return -ENOMEM;
1830 }
1831 #ifdef DEBUG
1832 print_hex_dump(KERN_ERR,
1833 "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
1834 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1835 #endif
1836
1837 return 0;
1838 }
1839
1840 /*
1841 * aead_edesc - s/w-extended aead descriptor
1842 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
1843 * @src_nents: number of segments in input scatterlist
1844 * @dst_nents: number of segments in output scatterlist
1845 * @iv_dma: dma address of iv for checking continuity and link table
1846 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1847 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1848 * @sec4_sg_dma: bus physical mapped address of h/w link table
1849 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1850 */
1851 struct aead_edesc {
1852 int assoc_nents;
1853 int src_nents;
1854 int dst_nents;
1855 dma_addr_t iv_dma;
1856 int sec4_sg_bytes;
1857 dma_addr_t sec4_sg_dma;
1858 struct sec4_sg_entry *sec4_sg;
1859 u32 hw_desc[];
1860 };
1861
1862 /*
1863 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1864 * @src_nents: number of segments in input scatterlist
1865 * @dst_nents: number of segments in output scatterlist
1866 * @iv_dma: dma address of iv for checking continuity and link table
1867 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1868 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1869 * @sec4_sg_dma: bus physical mapped address of h/w link table
1870 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1871 */
1872 struct ablkcipher_edesc {
1873 int src_nents;
1874 int dst_nents;
1875 dma_addr_t iv_dma;
1876 int sec4_sg_bytes;
1877 dma_addr_t sec4_sg_dma;
1878 struct sec4_sg_entry *sec4_sg;
1879 u32 hw_desc[0];
1880 };
1881
1882 static void caam_unmap(struct device *dev, struct scatterlist *src,
1883 struct scatterlist *dst, int src_nents,
1884 int dst_nents,
1885 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1886 int sec4_sg_bytes)
1887 {
1888 if (dst != src) {
1889 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
1890 dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
1891 } else {
1892 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
1893 }
1894
1895 if (iv_dma)
1896 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1897 if (sec4_sg_bytes)
1898 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
1899 DMA_TO_DEVICE);
1900 }
1901
1902 static void aead_unmap(struct device *dev,
1903 struct aead_edesc *edesc,
1904 struct aead_request *req)
1905 {
1906 caam_unmap(dev, req->src, req->dst,
1907 edesc->src_nents, edesc->dst_nents, 0, 0,
1908 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1909 }
1910
1911 static void ablkcipher_unmap(struct device *dev,
1912 struct ablkcipher_edesc *edesc,
1913 struct ablkcipher_request *req)
1914 {
1915 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1916 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1917
1918 caam_unmap(dev, req->src, req->dst,
1919 edesc->src_nents, edesc->dst_nents,
1920 edesc->iv_dma, ivsize,
1921 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1922 }
1923
1924 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1925 void *context)
1926 {
1927 struct aead_request *req = context;
1928 struct aead_edesc *edesc;
1929
1930 #ifdef DEBUG
1931 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1932 #endif
1933
1934 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1935
1936 if (err)
1937 caam_jr_strstatus(jrdev, err);
1938
1939 aead_unmap(jrdev, edesc, req);
1940
1941 kfree(edesc);
1942
1943 aead_request_complete(req, err);
1944 }
1945
1946 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1947 void *context)
1948 {
1949 struct aead_request *req = context;
1950 struct aead_edesc *edesc;
1951
1952 #ifdef DEBUG
1953 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1954 #endif
1955
1956 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1957
1958 if (err)
1959 caam_jr_strstatus(jrdev, err);
1960
1961 aead_unmap(jrdev, edesc, req);
1962
1963 /*
1964 * verify hw auth check passed else return -EBADMSG
1965 */
1966 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
1967 err = -EBADMSG;
1968
1969 kfree(edesc);
1970
1971 aead_request_complete(req, err);
1972 }
1973
1974 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1975 void *context)
1976 {
1977 struct ablkcipher_request *req = context;
1978 struct ablkcipher_edesc *edesc;
1979 #ifdef DEBUG
1980 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1981 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1982
1983 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1984 #endif
1985
1986 edesc = (struct ablkcipher_edesc *)((char *)desc -
1987 offsetof(struct ablkcipher_edesc, hw_desc));
1988
1989 if (err)
1990 caam_jr_strstatus(jrdev, err);
1991
1992 #ifdef DEBUG
1993 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
1994 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1995 edesc->src_nents > 1 ? 100 : ivsize, 1);
1996 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
1997 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1998 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1999 #endif
2000
2001 ablkcipher_unmap(jrdev, edesc, req);
2002 kfree(edesc);
2003
2004 ablkcipher_request_complete(req, err);
2005 }
2006
2007 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
2008 void *context)
2009 {
2010 struct ablkcipher_request *req = context;
2011 struct ablkcipher_edesc *edesc;
2012 #ifdef DEBUG
2013 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2014 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2015
2016 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2017 #endif
2018
2019 edesc = (struct ablkcipher_edesc *)((char *)desc -
2020 offsetof(struct ablkcipher_edesc, hw_desc));
2021 if (err)
2022 caam_jr_strstatus(jrdev, err);
2023
2024 #ifdef DEBUG
2025 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
2026 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2027 ivsize, 1);
2028 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
2029 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2030 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2031 #endif
2032
2033 ablkcipher_unmap(jrdev, edesc, req);
2034 kfree(edesc);
2035
2036 ablkcipher_request_complete(req, err);
2037 }
2038
2039 /*
2040 * Fill in aead job descriptor
2041 */
2042 static void init_aead_job(struct aead_request *req,
2043 struct aead_edesc *edesc,
2044 bool all_contig, bool encrypt)
2045 {
2046 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2047 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2048 int authsize = ctx->authsize;
2049 u32 *desc = edesc->hw_desc;
2050 u32 out_options, in_options;
2051 dma_addr_t dst_dma, src_dma;
2052 int len, sec4_sg_index = 0;
2053 dma_addr_t ptr;
2054 u32 *sh_desc;
2055
2056 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
2057 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
2058
2059 len = desc_len(sh_desc);
2060 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2061
2062 if (all_contig) {
2063 src_dma = sg_dma_address(req->src);
2064 in_options = 0;
2065 } else {
2066 src_dma = edesc->sec4_sg_dma;
2067 sec4_sg_index += edesc->src_nents;
2068 in_options = LDST_SGF;
2069 }
2070
2071 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
2072 in_options);
2073
2074 dst_dma = src_dma;
2075 out_options = in_options;
2076
2077 if (unlikely(req->src != req->dst)) {
2078 if (!edesc->dst_nents) {
2079 dst_dma = sg_dma_address(req->dst);
2080 } else {
2081 dst_dma = edesc->sec4_sg_dma +
2082 sec4_sg_index *
2083 sizeof(struct sec4_sg_entry);
2084 out_options = LDST_SGF;
2085 }
2086 }
2087
2088 if (encrypt)
2089 append_seq_out_ptr(desc, dst_dma,
2090 req->assoclen + req->cryptlen + authsize,
2091 out_options);
2092 else
2093 append_seq_out_ptr(desc, dst_dma,
2094 req->assoclen + req->cryptlen - authsize,
2095 out_options);
2096
2097 /* REG3 = assoclen */
2098 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
2099 }
2100
2101 static void init_gcm_job(struct aead_request *req,
2102 struct aead_edesc *edesc,
2103 bool all_contig, bool encrypt)
2104 {
2105 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2106 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2107 unsigned int ivsize = crypto_aead_ivsize(aead);
2108 u32 *desc = edesc->hw_desc;
2109 bool generic_gcm = (ivsize == 12);
2110 unsigned int last;
2111
2112 init_aead_job(req, edesc, all_contig, encrypt);
2113
2114 /* BUG This should not be specific to generic GCM. */
2115 last = 0;
2116 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
2117 last = FIFOLD_TYPE_LAST1;
2118
2119 /* Read GCM IV */
2120 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
2121 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
2122 /* Append Salt */
2123 if (!generic_gcm)
2124 append_data(desc, ctx->key + ctx->enckeylen, 4);
2125 /* Append IV */
2126 append_data(desc, req->iv, ivsize);
2127 /* End of blank commands */
2128 }
2129
2130 static void init_authenc_job(struct aead_request *req,
2131 struct aead_edesc *edesc,
2132 bool all_contig, bool encrypt)
2133 {
2134 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2135 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
2136 struct caam_aead_alg, aead);
2137 unsigned int ivsize = crypto_aead_ivsize(aead);
2138 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2139 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
2140 OP_ALG_AAI_CTR_MOD128);
2141 const bool is_rfc3686 = alg->caam.rfc3686;
2142 u32 *desc = edesc->hw_desc;
2143 u32 ivoffset = 0;
2144
2145 /*
2146 * AES-CTR needs to load IV in CONTEXT1 reg
2147 * at an offset of 128bits (16bytes)
2148 * CONTEXT1[255:128] = IV
2149 */
2150 if (ctr_mode)
2151 ivoffset = 16;
2152
2153 /*
2154 * RFC3686 specific:
2155 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
2156 */
2157 if (is_rfc3686)
2158 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
2159
2160 init_aead_job(req, edesc, all_contig, encrypt);
2161
2162 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
2163 append_load_as_imm(desc, req->iv, ivsize,
2164 LDST_CLASS_1_CCB |
2165 LDST_SRCDST_BYTE_CONTEXT |
2166 (ivoffset << LDST_OFFSET_SHIFT));
2167 }
2168
2169 /*
2170 * Fill in ablkcipher job descriptor
2171 */
2172 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2173 struct ablkcipher_edesc *edesc,
2174 struct ablkcipher_request *req,
2175 bool iv_contig)
2176 {
2177 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2178 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2179 u32 *desc = edesc->hw_desc;
2180 u32 out_options = 0, in_options;
2181 dma_addr_t dst_dma, src_dma;
2182 int len, sec4_sg_index = 0;
2183
2184 #ifdef DEBUG
2185 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
2186 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2187 ivsize, 1);
2188 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
2189 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2190 edesc->src_nents ? 100 : req->nbytes, 1);
2191 #endif
2192
2193 len = desc_len(sh_desc);
2194 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2195
2196 if (iv_contig) {
2197 src_dma = edesc->iv_dma;
2198 in_options = 0;
2199 } else {
2200 src_dma = edesc->sec4_sg_dma;
2201 sec4_sg_index += edesc->src_nents + 1;
2202 in_options = LDST_SGF;
2203 }
2204 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2205
2206 if (likely(req->src == req->dst)) {
2207 if (!edesc->src_nents && iv_contig) {
2208 dst_dma = sg_dma_address(req->src);
2209 } else {
2210 dst_dma = edesc->sec4_sg_dma +
2211 sizeof(struct sec4_sg_entry);
2212 out_options = LDST_SGF;
2213 }
2214 } else {
2215 if (!edesc->dst_nents) {
2216 dst_dma = sg_dma_address(req->dst);
2217 } else {
2218 dst_dma = edesc->sec4_sg_dma +
2219 sec4_sg_index * sizeof(struct sec4_sg_entry);
2220 out_options = LDST_SGF;
2221 }
2222 }
2223 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2224 }
2225
2226 /*
2227 * Fill in ablkcipher givencrypt job descriptor
2228 */
2229 static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
2230 struct ablkcipher_edesc *edesc,
2231 struct ablkcipher_request *req,
2232 bool iv_contig)
2233 {
2234 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2235 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2236 u32 *desc = edesc->hw_desc;
2237 u32 out_options, in_options;
2238 dma_addr_t dst_dma, src_dma;
2239 int len, sec4_sg_index = 0;
2240
2241 #ifdef DEBUG
2242 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
2243 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2244 ivsize, 1);
2245 print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ",
2246 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2247 edesc->src_nents ? 100 : req->nbytes, 1);
2248 #endif
2249
2250 len = desc_len(sh_desc);
2251 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2252
2253 if (!edesc->src_nents) {
2254 src_dma = sg_dma_address(req->src);
2255 in_options = 0;
2256 } else {
2257 src_dma = edesc->sec4_sg_dma;
2258 sec4_sg_index += edesc->src_nents;
2259 in_options = LDST_SGF;
2260 }
2261 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
2262
2263 if (iv_contig) {
2264 dst_dma = edesc->iv_dma;
2265 out_options = 0;
2266 } else {
2267 dst_dma = edesc->sec4_sg_dma +
2268 sec4_sg_index * sizeof(struct sec4_sg_entry);
2269 out_options = LDST_SGF;
2270 }
2271 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
2272 }
2273
2274 /*
2275 * allocate and map the aead extended descriptor
2276 */
2277 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2278 int desc_bytes, bool *all_contig_ptr,
2279 bool encrypt)
2280 {
2281 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2282 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2283 struct device *jrdev = ctx->jrdev;
2284 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2285 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2286 int src_nents, dst_nents = 0;
2287 struct aead_edesc *edesc;
2288 int sgc;
2289 bool all_contig = true;
2290 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2291 unsigned int authsize = ctx->authsize;
2292
2293 if (unlikely(req->dst != req->src)) {
2294 src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
2295 dst_nents = sg_count(req->dst,
2296 req->assoclen + req->cryptlen +
2297 (encrypt ? authsize : (-authsize)));
2298 } else {
2299 src_nents = sg_count(req->src,
2300 req->assoclen + req->cryptlen +
2301 (encrypt ? authsize : 0));
2302 }
2303
2304 /* Check if data are contiguous. */
2305 all_contig = !src_nents;
2306 if (!all_contig) {
2307 src_nents = src_nents ? : 1;
2308 sec4_sg_len = src_nents;
2309 }
2310
2311 sec4_sg_len += dst_nents;
2312
2313 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2314
2315 /* allocate space for base edesc and hw desc commands, link tables */
2316 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2317 GFP_DMA | flags);
2318 if (!edesc) {
2319 dev_err(jrdev, "could not allocate extended descriptor\n");
2320 return ERR_PTR(-ENOMEM);
2321 }
2322
2323 if (likely(req->src == req->dst)) {
2324 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2325 DMA_BIDIRECTIONAL);
2326 if (unlikely(!sgc)) {
2327 dev_err(jrdev, "unable to map source\n");
2328 kfree(edesc);
2329 return ERR_PTR(-ENOMEM);
2330 }
2331 } else {
2332 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2333 DMA_TO_DEVICE);
2334 if (unlikely(!sgc)) {
2335 dev_err(jrdev, "unable to map source\n");
2336 kfree(edesc);
2337 return ERR_PTR(-ENOMEM);
2338 }
2339
2340 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2341 DMA_FROM_DEVICE);
2342 if (unlikely(!sgc)) {
2343 dev_err(jrdev, "unable to map destination\n");
2344 dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
2345 DMA_TO_DEVICE);
2346 kfree(edesc);
2347 return ERR_PTR(-ENOMEM);
2348 }
2349 }
2350
2351 edesc->src_nents = src_nents;
2352 edesc->dst_nents = dst_nents;
2353 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2354 desc_bytes;
2355 *all_contig_ptr = all_contig;
2356
2357 sec4_sg_index = 0;
2358 if (!all_contig) {
2359 sg_to_sec4_sg_last(req->src, src_nents,
2360 edesc->sec4_sg + sec4_sg_index, 0);
2361 sec4_sg_index += src_nents;
2362 }
2363 if (dst_nents) {
2364 sg_to_sec4_sg_last(req->dst, dst_nents,
2365 edesc->sec4_sg + sec4_sg_index, 0);
2366 }
2367
2368 if (!sec4_sg_bytes)
2369 return edesc;
2370
2371 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2372 sec4_sg_bytes, DMA_TO_DEVICE);
2373 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2374 dev_err(jrdev, "unable to map S/G table\n");
2375 aead_unmap(jrdev, edesc, req);
2376 kfree(edesc);
2377 return ERR_PTR(-ENOMEM);
2378 }
2379
2380 edesc->sec4_sg_bytes = sec4_sg_bytes;
2381
2382 return edesc;
2383 }
2384
2385 static int gcm_encrypt(struct aead_request *req)
2386 {
2387 struct aead_edesc *edesc;
2388 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2389 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2390 struct device *jrdev = ctx->jrdev;
2391 bool all_contig;
2392 u32 *desc;
2393 int ret = 0;
2394
2395 /* allocate extended descriptor */
2396 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
2397 if (IS_ERR(edesc))
2398 return PTR_ERR(edesc);
2399
2400 /* Create and submit job descriptor */
2401 init_gcm_job(req, edesc, all_contig, true);
2402 #ifdef DEBUG
2403 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2404 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2405 desc_bytes(edesc->hw_desc), 1);
2406 #endif
2407
2408 desc = edesc->hw_desc;
2409 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2410 if (!ret) {
2411 ret = -EINPROGRESS;
2412 } else {
2413 aead_unmap(jrdev, edesc, req);
2414 kfree(edesc);
2415 }
2416
2417 return ret;
2418 }
2419
2420 static int ipsec_gcm_encrypt(struct aead_request *req)
2421 {
2422 if (req->assoclen < 8)
2423 return -EINVAL;
2424
2425 return gcm_encrypt(req);
2426 }
2427
2428 static int aead_encrypt(struct aead_request *req)
2429 {
2430 struct aead_edesc *edesc;
2431 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2432 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2433 struct device *jrdev = ctx->jrdev;
2434 bool all_contig;
2435 u32 *desc;
2436 int ret = 0;
2437
2438 /* allocate extended descriptor */
2439 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2440 &all_contig, true);
2441 if (IS_ERR(edesc))
2442 return PTR_ERR(edesc);
2443
2444 /* Create and submit job descriptor */
2445 init_authenc_job(req, edesc, all_contig, true);
2446 #ifdef DEBUG
2447 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2448 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2449 desc_bytes(edesc->hw_desc), 1);
2450 #endif
2451
2452 desc = edesc->hw_desc;
2453 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2454 if (!ret) {
2455 ret = -EINPROGRESS;
2456 } else {
2457 aead_unmap(jrdev, edesc, req);
2458 kfree(edesc);
2459 }
2460
2461 return ret;
2462 }
2463
2464 static int gcm_decrypt(struct aead_request *req)
2465 {
2466 struct aead_edesc *edesc;
2467 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2468 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2469 struct device *jrdev = ctx->jrdev;
2470 bool all_contig;
2471 u32 *desc;
2472 int ret = 0;
2473
2474 /* allocate extended descriptor */
2475 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
2476 if (IS_ERR(edesc))
2477 return PTR_ERR(edesc);
2478
2479 /* Create and submit job descriptor*/
2480 init_gcm_job(req, edesc, all_contig, false);
2481 #ifdef DEBUG
2482 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2483 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2484 desc_bytes(edesc->hw_desc), 1);
2485 #endif
2486
2487 desc = edesc->hw_desc;
2488 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2489 if (!ret) {
2490 ret = -EINPROGRESS;
2491 } else {
2492 aead_unmap(jrdev, edesc, req);
2493 kfree(edesc);
2494 }
2495
2496 return ret;
2497 }
2498
2499 static int ipsec_gcm_decrypt(struct aead_request *req)
2500 {
2501 if (req->assoclen < 8)
2502 return -EINVAL;
2503
2504 return gcm_decrypt(req);
2505 }
2506
2507 static int aead_decrypt(struct aead_request *req)
2508 {
2509 struct aead_edesc *edesc;
2510 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2511 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2512 struct device *jrdev = ctx->jrdev;
2513 bool all_contig;
2514 u32 *desc;
2515 int ret = 0;
2516
2517 /* allocate extended descriptor */
2518 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2519 &all_contig, false);
2520 if (IS_ERR(edesc))
2521 return PTR_ERR(edesc);
2522
2523 #ifdef DEBUG
2524 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
2525 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2526 req->assoclen + req->cryptlen, 1);
2527 #endif
2528
2529 /* Create and submit job descriptor*/
2530 init_authenc_job(req, edesc, all_contig, false);
2531 #ifdef DEBUG
2532 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2533 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2534 desc_bytes(edesc->hw_desc), 1);
2535 #endif
2536
2537 desc = edesc->hw_desc;
2538 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2539 if (!ret) {
2540 ret = -EINPROGRESS;
2541 } else {
2542 aead_unmap(jrdev, edesc, req);
2543 kfree(edesc);
2544 }
2545
2546 return ret;
2547 }
2548
2549 /*
2550 * allocate and map the ablkcipher extended descriptor for ablkcipher
2551 */
2552 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2553 *req, int desc_bytes,
2554 bool *iv_contig_out)
2555 {
2556 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2557 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2558 struct device *jrdev = ctx->jrdev;
2559 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2560 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2561 GFP_KERNEL : GFP_ATOMIC;
2562 int src_nents, dst_nents = 0, sec4_sg_bytes;
2563 struct ablkcipher_edesc *edesc;
2564 dma_addr_t iv_dma = 0;
2565 bool iv_contig = false;
2566 int sgc;
2567 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2568 int sec4_sg_index;
2569
2570 src_nents = sg_count(req->src, req->nbytes);
2571
2572 if (req->dst != req->src)
2573 dst_nents = sg_count(req->dst, req->nbytes);
2574
2575 if (likely(req->src == req->dst)) {
2576 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2577 DMA_BIDIRECTIONAL);
2578 } else {
2579 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2580 DMA_TO_DEVICE);
2581 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2582 DMA_FROM_DEVICE);
2583 }
2584
2585 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
2586 if (dma_mapping_error(jrdev, iv_dma)) {
2587 dev_err(jrdev, "unable to map IV\n");
2588 return ERR_PTR(-ENOMEM);
2589 }
2590
2591 /*
2592 * Check if iv can be contiguous with source and destination.
2593 * If so, include it. If not, create scatterlist.
2594 */
2595 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
2596 iv_contig = true;
2597 else
2598 src_nents = src_nents ? : 1;
2599 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2600 sizeof(struct sec4_sg_entry);
2601
2602 /* allocate space for base edesc and hw desc commands, link tables */
2603 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2604 GFP_DMA | flags);
2605 if (!edesc) {
2606 dev_err(jrdev, "could not allocate extended descriptor\n");
2607 return ERR_PTR(-ENOMEM);
2608 }
2609
2610 edesc->src_nents = src_nents;
2611 edesc->dst_nents = dst_nents;
2612 edesc->sec4_sg_bytes = sec4_sg_bytes;
2613 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2614 desc_bytes;
2615
2616 sec4_sg_index = 0;
2617 if (!iv_contig) {
2618 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2619 sg_to_sec4_sg_last(req->src, src_nents,
2620 edesc->sec4_sg + 1, 0);
2621 sec4_sg_index += 1 + src_nents;
2622 }
2623
2624 if (dst_nents) {
2625 sg_to_sec4_sg_last(req->dst, dst_nents,
2626 edesc->sec4_sg + sec4_sg_index, 0);
2627 }
2628
2629 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2630 sec4_sg_bytes, DMA_TO_DEVICE);
2631 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2632 dev_err(jrdev, "unable to map S/G table\n");
2633 return ERR_PTR(-ENOMEM);
2634 }
2635
2636 edesc->iv_dma = iv_dma;
2637
2638 #ifdef DEBUG
2639 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
2640 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2641 sec4_sg_bytes, 1);
2642 #endif
2643
2644 *iv_contig_out = iv_contig;
2645 return edesc;
2646 }
2647
2648 static int ablkcipher_encrypt(struct ablkcipher_request *req)
2649 {
2650 struct ablkcipher_edesc *edesc;
2651 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2652 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2653 struct device *jrdev = ctx->jrdev;
2654 bool iv_contig;
2655 u32 *desc;
2656 int ret = 0;
2657
2658 /* allocate extended descriptor */
2659 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2660 CAAM_CMD_SZ, &iv_contig);
2661 if (IS_ERR(edesc))
2662 return PTR_ERR(edesc);
2663
2664 /* Create and submit job descriptor*/
2665 init_ablkcipher_job(ctx->sh_desc_enc,
2666 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
2667 #ifdef DEBUG
2668 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
2669 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2670 desc_bytes(edesc->hw_desc), 1);
2671 #endif
2672 desc = edesc->hw_desc;
2673 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2674
2675 if (!ret) {
2676 ret = -EINPROGRESS;
2677 } else {
2678 ablkcipher_unmap(jrdev, edesc, req);
2679 kfree(edesc);
2680 }
2681
2682 return ret;
2683 }
2684
2685 static int ablkcipher_decrypt(struct ablkcipher_request *req)
2686 {
2687 struct ablkcipher_edesc *edesc;
2688 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2689 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2690 struct device *jrdev = ctx->jrdev;
2691 bool iv_contig;
2692 u32 *desc;
2693 int ret = 0;
2694
2695 /* allocate extended descriptor */
2696 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2697 CAAM_CMD_SZ, &iv_contig);
2698 if (IS_ERR(edesc))
2699 return PTR_ERR(edesc);
2700
2701 /* Create and submit job descriptor*/
2702 init_ablkcipher_job(ctx->sh_desc_dec,
2703 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
2704 desc = edesc->hw_desc;
2705 #ifdef DEBUG
2706 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
2707 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2708 desc_bytes(edesc->hw_desc), 1);
2709 #endif
2710
2711 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
2712 if (!ret) {
2713 ret = -EINPROGRESS;
2714 } else {
2715 ablkcipher_unmap(jrdev, edesc, req);
2716 kfree(edesc);
2717 }
2718
2719 return ret;
2720 }
2721
2722 /*
2723 * allocate and map the ablkcipher extended descriptor
2724 * for ablkcipher givencrypt
2725 */
2726 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
2727 struct skcipher_givcrypt_request *greq,
2728 int desc_bytes,
2729 bool *iv_contig_out)
2730 {
2731 struct ablkcipher_request *req = &greq->creq;
2732 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2733 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2734 struct device *jrdev = ctx->jrdev;
2735 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2736 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2737 GFP_KERNEL : GFP_ATOMIC;
2738 int src_nents, dst_nents = 0, sec4_sg_bytes;
2739 struct ablkcipher_edesc *edesc;
2740 dma_addr_t iv_dma = 0;
2741 bool iv_contig = false;
2742 int sgc;
2743 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2744 int sec4_sg_index;
2745
2746 src_nents = sg_count(req->src, req->nbytes);
2747
2748 if (unlikely(req->dst != req->src))
2749 dst_nents = sg_count(req->dst, req->nbytes);
2750
2751 if (likely(req->src == req->dst)) {
2752 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2753 DMA_BIDIRECTIONAL);
2754 } else {
2755 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2756 DMA_TO_DEVICE);
2757 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2758 DMA_FROM_DEVICE);
2759 }
2760
2761 /*
2762 * Check if iv can be contiguous with source and destination.
2763 * If so, include it. If not, create scatterlist.
2764 */
2765 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
2766 if (dma_mapping_error(jrdev, iv_dma)) {
2767 dev_err(jrdev, "unable to map IV\n");
2768 return ERR_PTR(-ENOMEM);
2769 }
2770
2771 if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
2772 iv_contig = true;
2773 else
2774 dst_nents = dst_nents ? : 1;
2775 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2776 sizeof(struct sec4_sg_entry);
2777
2778 /* allocate space for base edesc and hw desc commands, link tables */
2779 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2780 GFP_DMA | flags);
2781 if (!edesc) {
2782 dev_err(jrdev, "could not allocate extended descriptor\n");
2783 return ERR_PTR(-ENOMEM);
2784 }
2785
2786 edesc->src_nents = src_nents;
2787 edesc->dst_nents = dst_nents;
2788 edesc->sec4_sg_bytes = sec4_sg_bytes;
2789 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2790 desc_bytes;
2791
2792 sec4_sg_index = 0;
2793 if (src_nents) {
2794 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
2795 sec4_sg_index += src_nents;
2796 }
2797
2798 if (!iv_contig) {
2799 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2800 iv_dma, ivsize, 0);
2801 sec4_sg_index += 1;
2802 sg_to_sec4_sg_last(req->dst, dst_nents,
2803 edesc->sec4_sg + sec4_sg_index, 0);
2804 }
2805
2806 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2807 sec4_sg_bytes, DMA_TO_DEVICE);
2808 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2809 dev_err(jrdev, "unable to map S/G table\n");
2810 return ERR_PTR(-ENOMEM);
2811 }
2812 edesc->iv_dma = iv_dma;
2813
2814 #ifdef DEBUG
2815 print_hex_dump(KERN_ERR,
2816 "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
2817 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2818 sec4_sg_bytes, 1);
2819 #endif
2820
2821 *iv_contig_out = iv_contig;
2822 return edesc;
2823 }
2824
2825 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
2826 {
2827 struct ablkcipher_request *req = &creq->creq;
2828 struct ablkcipher_edesc *edesc;
2829 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2830 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2831 struct device *jrdev = ctx->jrdev;
2832 bool iv_contig;
2833 u32 *desc;
2834 int ret = 0;
2835
2836 /* allocate extended descriptor */
2837 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
2838 CAAM_CMD_SZ, &iv_contig);
2839 if (IS_ERR(edesc))
2840 return PTR_ERR(edesc);
2841
2842 /* Create and submit job descriptor*/
2843 init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
2844 edesc, req, iv_contig);
2845 #ifdef DEBUG
2846 print_hex_dump(KERN_ERR,
2847 "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
2848 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2849 desc_bytes(edesc->hw_desc), 1);
2850 #endif
2851 desc = edesc->hw_desc;
2852 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2853
2854 if (!ret) {
2855 ret = -EINPROGRESS;
2856 } else {
2857 ablkcipher_unmap(jrdev, edesc, req);
2858 kfree(edesc);
2859 }
2860
2861 return ret;
2862 }
2863
2864 #define template_aead template_u.aead
2865 #define template_ablkcipher template_u.ablkcipher
2866 struct caam_alg_template {
2867 char name[CRYPTO_MAX_ALG_NAME];
2868 char driver_name[CRYPTO_MAX_ALG_NAME];
2869 unsigned int blocksize;
2870 u32 type;
2871 union {
2872 struct ablkcipher_alg ablkcipher;
2873 } template_u;
2874 u32 class1_alg_type;
2875 u32 class2_alg_type;
2876 u32 alg_op;
2877 };
2878
2879 static struct caam_alg_template driver_algs[] = {
2880 /* ablkcipher descriptor */
2881 {
2882 .name = "cbc(aes)",
2883 .driver_name = "cbc-aes-caam",
2884 .blocksize = AES_BLOCK_SIZE,
2885 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
2886 .template_ablkcipher = {
2887 .setkey = ablkcipher_setkey,
2888 .encrypt = ablkcipher_encrypt,
2889 .decrypt = ablkcipher_decrypt,
2890 .givencrypt = ablkcipher_givencrypt,
2891 .geniv = "<built-in>",
2892 .min_keysize = AES_MIN_KEY_SIZE,
2893 .max_keysize = AES_MAX_KEY_SIZE,
2894 .ivsize = AES_BLOCK_SIZE,
2895 },
2896 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2897 },
2898 {
2899 .name = "cbc(des3_ede)",
2900 .driver_name = "cbc-3des-caam",
2901 .blocksize = DES3_EDE_BLOCK_SIZE,
2902 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
2903 .template_ablkcipher = {
2904 .setkey = ablkcipher_setkey,
2905 .encrypt = ablkcipher_encrypt,
2906 .decrypt = ablkcipher_decrypt,
2907 .givencrypt = ablkcipher_givencrypt,
2908 .geniv = "<built-in>",
2909 .min_keysize = DES3_EDE_KEY_SIZE,
2910 .max_keysize = DES3_EDE_KEY_SIZE,
2911 .ivsize = DES3_EDE_BLOCK_SIZE,
2912 },
2913 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2914 },
2915 {
2916 .name = "cbc(des)",
2917 .driver_name = "cbc-des-caam",
2918 .blocksize = DES_BLOCK_SIZE,
2919 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
2920 .template_ablkcipher = {
2921 .setkey = ablkcipher_setkey,
2922 .encrypt = ablkcipher_encrypt,
2923 .decrypt = ablkcipher_decrypt,
2924 .givencrypt = ablkcipher_givencrypt,
2925 .geniv = "<built-in>",
2926 .min_keysize = DES_KEY_SIZE,
2927 .max_keysize = DES_KEY_SIZE,
2928 .ivsize = DES_BLOCK_SIZE,
2929 },
2930 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2931 },
2932 {
2933 .name = "ctr(aes)",
2934 .driver_name = "ctr-aes-caam",
2935 .blocksize = 1,
2936 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2937 .template_ablkcipher = {
2938 .setkey = ablkcipher_setkey,
2939 .encrypt = ablkcipher_encrypt,
2940 .decrypt = ablkcipher_decrypt,
2941 .geniv = "chainiv",
2942 .min_keysize = AES_MIN_KEY_SIZE,
2943 .max_keysize = AES_MAX_KEY_SIZE,
2944 .ivsize = AES_BLOCK_SIZE,
2945 },
2946 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
2947 },
2948 {
2949 .name = "rfc3686(ctr(aes))",
2950 .driver_name = "rfc3686-ctr-aes-caam",
2951 .blocksize = 1,
2952 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
2953 .template_ablkcipher = {
2954 .setkey = ablkcipher_setkey,
2955 .encrypt = ablkcipher_encrypt,
2956 .decrypt = ablkcipher_decrypt,
2957 .givencrypt = ablkcipher_givencrypt,
2958 .geniv = "<built-in>",
2959 .min_keysize = AES_MIN_KEY_SIZE +
2960 CTR_RFC3686_NONCE_SIZE,
2961 .max_keysize = AES_MAX_KEY_SIZE +
2962 CTR_RFC3686_NONCE_SIZE,
2963 .ivsize = CTR_RFC3686_IV_SIZE,
2964 },
2965 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
2966 },
2967 {
2968 .name = "xts(aes)",
2969 .driver_name = "xts-aes-caam",
2970 .blocksize = AES_BLOCK_SIZE,
2971 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2972 .template_ablkcipher = {
2973 .setkey = xts_ablkcipher_setkey,
2974 .encrypt = ablkcipher_encrypt,
2975 .decrypt = ablkcipher_decrypt,
2976 .geniv = "eseqiv",
2977 .min_keysize = 2 * AES_MIN_KEY_SIZE,
2978 .max_keysize = 2 * AES_MAX_KEY_SIZE,
2979 .ivsize = AES_BLOCK_SIZE,
2980 },
2981 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
2982 },
2983 };
2984
2985 static struct caam_aead_alg driver_aeads[] = {
2986 {
2987 .aead = {
2988 .base = {
2989 .cra_name = "rfc4106(gcm(aes))",
2990 .cra_driver_name = "rfc4106-gcm-aes-caam",
2991 .cra_blocksize = 1,
2992 },
2993 .setkey = rfc4106_setkey,
2994 .setauthsize = rfc4106_setauthsize,
2995 .encrypt = ipsec_gcm_encrypt,
2996 .decrypt = ipsec_gcm_decrypt,
2997 .ivsize = 8,
2998 .maxauthsize = AES_BLOCK_SIZE,
2999 },
3000 .caam = {
3001 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3002 },
3003 },
3004 {
3005 .aead = {
3006 .base = {
3007 .cra_name = "rfc4543(gcm(aes))",
3008 .cra_driver_name = "rfc4543-gcm-aes-caam",
3009 .cra_blocksize = 1,
3010 },
3011 .setkey = rfc4543_setkey,
3012 .setauthsize = rfc4543_setauthsize,
3013 .encrypt = ipsec_gcm_encrypt,
3014 .decrypt = ipsec_gcm_decrypt,
3015 .ivsize = 8,
3016 .maxauthsize = AES_BLOCK_SIZE,
3017 },
3018 .caam = {
3019 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3020 },
3021 },
3022 /* Galois Counter Mode */
3023 {
3024 .aead = {
3025 .base = {
3026 .cra_name = "gcm(aes)",
3027 .cra_driver_name = "gcm-aes-caam",
3028 .cra_blocksize = 1,
3029 },
3030 .setkey = gcm_setkey,
3031 .setauthsize = gcm_setauthsize,
3032 .encrypt = gcm_encrypt,
3033 .decrypt = gcm_decrypt,
3034 .ivsize = 12,
3035 .maxauthsize = AES_BLOCK_SIZE,
3036 },
3037 .caam = {
3038 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3039 },
3040 },
3041 /* single-pass ipsec_esp descriptor */
3042 {
3043 .aead = {
3044 .base = {
3045 .cra_name = "authenc(hmac(md5),"
3046 "ecb(cipher_null))",
3047 .cra_driver_name = "authenc-hmac-md5-"
3048 "ecb-cipher_null-caam",
3049 .cra_blocksize = NULL_BLOCK_SIZE,
3050 },
3051 .setkey = aead_setkey,
3052 .setauthsize = aead_setauthsize,
3053 .encrypt = aead_encrypt,
3054 .decrypt = aead_decrypt,
3055 .ivsize = NULL_IV_SIZE,
3056 .maxauthsize = MD5_DIGEST_SIZE,
3057 },
3058 .caam = {
3059 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3060 OP_ALG_AAI_HMAC_PRECOMP,
3061 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3062 },
3063 },
3064 {
3065 .aead = {
3066 .base = {
3067 .cra_name = "authenc(hmac(sha1),"
3068 "ecb(cipher_null))",
3069 .cra_driver_name = "authenc-hmac-sha1-"
3070 "ecb-cipher_null-caam",
3071 .cra_blocksize = NULL_BLOCK_SIZE,
3072 },
3073 .setkey = aead_setkey,
3074 .setauthsize = aead_setauthsize,
3075 .encrypt = aead_encrypt,
3076 .decrypt = aead_decrypt,
3077 .ivsize = NULL_IV_SIZE,
3078 .maxauthsize = SHA1_DIGEST_SIZE,
3079 },
3080 .caam = {
3081 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3082 OP_ALG_AAI_HMAC_PRECOMP,
3083 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3084 },
3085 },
3086 {
3087 .aead = {
3088 .base = {
3089 .cra_name = "authenc(hmac(sha224),"
3090 "ecb(cipher_null))",
3091 .cra_driver_name = "authenc-hmac-sha224-"
3092 "ecb-cipher_null-caam",
3093 .cra_blocksize = NULL_BLOCK_SIZE,
3094 },
3095 .setkey = aead_setkey,
3096 .setauthsize = aead_setauthsize,
3097 .encrypt = aead_encrypt,
3098 .decrypt = aead_decrypt,
3099 .ivsize = NULL_IV_SIZE,
3100 .maxauthsize = SHA224_DIGEST_SIZE,
3101 },
3102 .caam = {
3103 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3104 OP_ALG_AAI_HMAC_PRECOMP,
3105 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3106 },
3107 },
3108 {
3109 .aead = {
3110 .base = {
3111 .cra_name = "authenc(hmac(sha256),"
3112 "ecb(cipher_null))",
3113 .cra_driver_name = "authenc-hmac-sha256-"
3114 "ecb-cipher_null-caam",
3115 .cra_blocksize = NULL_BLOCK_SIZE,
3116 },
3117 .setkey = aead_setkey,
3118 .setauthsize = aead_setauthsize,
3119 .encrypt = aead_encrypt,
3120 .decrypt = aead_decrypt,
3121 .ivsize = NULL_IV_SIZE,
3122 .maxauthsize = SHA256_DIGEST_SIZE,
3123 },
3124 .caam = {
3125 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3126 OP_ALG_AAI_HMAC_PRECOMP,
3127 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3128 },
3129 },
3130 {
3131 .aead = {
3132 .base = {
3133 .cra_name = "authenc(hmac(sha384),"
3134 "ecb(cipher_null))",
3135 .cra_driver_name = "authenc-hmac-sha384-"
3136 "ecb-cipher_null-caam",
3137 .cra_blocksize = NULL_BLOCK_SIZE,
3138 },
3139 .setkey = aead_setkey,
3140 .setauthsize = aead_setauthsize,
3141 .encrypt = aead_encrypt,
3142 .decrypt = aead_decrypt,
3143 .ivsize = NULL_IV_SIZE,
3144 .maxauthsize = SHA384_DIGEST_SIZE,
3145 },
3146 .caam = {
3147 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3148 OP_ALG_AAI_HMAC_PRECOMP,
3149 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3150 },
3151 },
3152 {
3153 .aead = {
3154 .base = {
3155 .cra_name = "authenc(hmac(sha512),"
3156 "ecb(cipher_null))",
3157 .cra_driver_name = "authenc-hmac-sha512-"
3158 "ecb-cipher_null-caam",
3159 .cra_blocksize = NULL_BLOCK_SIZE,
3160 },
3161 .setkey = aead_setkey,
3162 .setauthsize = aead_setauthsize,
3163 .encrypt = aead_encrypt,
3164 .decrypt = aead_decrypt,
3165 .ivsize = NULL_IV_SIZE,
3166 .maxauthsize = SHA512_DIGEST_SIZE,
3167 },
3168 .caam = {
3169 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3170 OP_ALG_AAI_HMAC_PRECOMP,
3171 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3172 },
3173 },
3174 {
3175 .aead = {
3176 .base = {
3177 .cra_name = "authenc(hmac(md5),cbc(aes))",
3178 .cra_driver_name = "authenc-hmac-md5-"
3179 "cbc-aes-caam",
3180 .cra_blocksize = AES_BLOCK_SIZE,
3181 },
3182 .setkey = aead_setkey,
3183 .setauthsize = aead_setauthsize,
3184 .encrypt = aead_encrypt,
3185 .decrypt = aead_decrypt,
3186 .ivsize = AES_BLOCK_SIZE,
3187 .maxauthsize = MD5_DIGEST_SIZE,
3188 },
3189 .caam = {
3190 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3191 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3192 OP_ALG_AAI_HMAC_PRECOMP,
3193 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3194 },
3195 },
3196 {
3197 .aead = {
3198 .base = {
3199 .cra_name = "echainiv(authenc(hmac(md5),"
3200 "cbc(aes)))",
3201 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3202 "cbc-aes-caam",
3203 .cra_blocksize = AES_BLOCK_SIZE,
3204 },
3205 .setkey = aead_setkey,
3206 .setauthsize = aead_setauthsize,
3207 .encrypt = aead_encrypt,
3208 .decrypt = aead_decrypt,
3209 .ivsize = AES_BLOCK_SIZE,
3210 .maxauthsize = MD5_DIGEST_SIZE,
3211 },
3212 .caam = {
3213 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3214 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3215 OP_ALG_AAI_HMAC_PRECOMP,
3216 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3217 .geniv = true,
3218 },
3219 },
3220 {
3221 .aead = {
3222 .base = {
3223 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3224 .cra_driver_name = "authenc-hmac-sha1-"
3225 "cbc-aes-caam",
3226 .cra_blocksize = AES_BLOCK_SIZE,
3227 },
3228 .setkey = aead_setkey,
3229 .setauthsize = aead_setauthsize,
3230 .encrypt = aead_encrypt,
3231 .decrypt = aead_decrypt,
3232 .ivsize = AES_BLOCK_SIZE,
3233 .maxauthsize = SHA1_DIGEST_SIZE,
3234 },
3235 .caam = {
3236 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3237 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3238 OP_ALG_AAI_HMAC_PRECOMP,
3239 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3240 },
3241 },
3242 {
3243 .aead = {
3244 .base = {
3245 .cra_name = "echainiv(authenc(hmac(sha1),"
3246 "cbc(aes)))",
3247 .cra_driver_name = "echainiv-authenc-"
3248 "hmac-sha1-cbc-aes-caam",
3249 .cra_blocksize = AES_BLOCK_SIZE,
3250 },
3251 .setkey = aead_setkey,
3252 .setauthsize = aead_setauthsize,
3253 .encrypt = aead_encrypt,
3254 .decrypt = aead_decrypt,
3255 .ivsize = AES_BLOCK_SIZE,
3256 .maxauthsize = SHA1_DIGEST_SIZE,
3257 },
3258 .caam = {
3259 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3260 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3261 OP_ALG_AAI_HMAC_PRECOMP,
3262 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3263 .geniv = true,
3264 },
3265 },
3266 {
3267 .aead = {
3268 .base = {
3269 .cra_name = "authenc(hmac(sha224),cbc(aes))",
3270 .cra_driver_name = "authenc-hmac-sha224-"
3271 "cbc-aes-caam",
3272 .cra_blocksize = AES_BLOCK_SIZE,
3273 },
3274 .setkey = aead_setkey,
3275 .setauthsize = aead_setauthsize,
3276 .encrypt = aead_encrypt,
3277 .decrypt = aead_decrypt,
3278 .ivsize = AES_BLOCK_SIZE,
3279 .maxauthsize = SHA224_DIGEST_SIZE,
3280 },
3281 .caam = {
3282 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3283 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3284 OP_ALG_AAI_HMAC_PRECOMP,
3285 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3286 },
3287 },
3288 {
3289 .aead = {
3290 .base = {
3291 .cra_name = "echainiv(authenc(hmac(sha224),"
3292 "cbc(aes)))",
3293 .cra_driver_name = "echainiv-authenc-"
3294 "hmac-sha224-cbc-aes-caam",
3295 .cra_blocksize = AES_BLOCK_SIZE,
3296 },
3297 .setkey = aead_setkey,
3298 .setauthsize = aead_setauthsize,
3299 .encrypt = aead_encrypt,
3300 .decrypt = aead_decrypt,
3301 .ivsize = AES_BLOCK_SIZE,
3302 .maxauthsize = SHA224_DIGEST_SIZE,
3303 },
3304 .caam = {
3305 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3306 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3307 OP_ALG_AAI_HMAC_PRECOMP,
3308 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3309 .geniv = true,
3310 },
3311 },
3312 {
3313 .aead = {
3314 .base = {
3315 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3316 .cra_driver_name = "authenc-hmac-sha256-"
3317 "cbc-aes-caam",
3318 .cra_blocksize = AES_BLOCK_SIZE,
3319 },
3320 .setkey = aead_setkey,
3321 .setauthsize = aead_setauthsize,
3322 .encrypt = aead_encrypt,
3323 .decrypt = aead_decrypt,
3324 .ivsize = AES_BLOCK_SIZE,
3325 .maxauthsize = SHA256_DIGEST_SIZE,
3326 },
3327 .caam = {
3328 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3329 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3330 OP_ALG_AAI_HMAC_PRECOMP,
3331 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3332 },
3333 },
3334 {
3335 .aead = {
3336 .base = {
3337 .cra_name = "echainiv(authenc(hmac(sha256),"
3338 "cbc(aes)))",
3339 .cra_driver_name = "echainiv-authenc-"
3340 "hmac-sha256-cbc-aes-caam",
3341 .cra_blocksize = AES_BLOCK_SIZE,
3342 },
3343 .setkey = aead_setkey,
3344 .setauthsize = aead_setauthsize,
3345 .encrypt = aead_encrypt,
3346 .decrypt = aead_decrypt,
3347 .ivsize = AES_BLOCK_SIZE,
3348 .maxauthsize = SHA256_DIGEST_SIZE,
3349 },
3350 .caam = {
3351 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3352 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3353 OP_ALG_AAI_HMAC_PRECOMP,
3354 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3355 .geniv = true,
3356 },
3357 },
3358 {
3359 .aead = {
3360 .base = {
3361 .cra_name = "authenc(hmac(sha384),cbc(aes))",
3362 .cra_driver_name = "authenc-hmac-sha384-"
3363 "cbc-aes-caam",
3364 .cra_blocksize = AES_BLOCK_SIZE,
3365 },
3366 .setkey = aead_setkey,
3367 .setauthsize = aead_setauthsize,
3368 .encrypt = aead_encrypt,
3369 .decrypt = aead_decrypt,
3370 .ivsize = AES_BLOCK_SIZE,
3371 .maxauthsize = SHA384_DIGEST_SIZE,
3372 },
3373 .caam = {
3374 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3375 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3376 OP_ALG_AAI_HMAC_PRECOMP,
3377 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3378 },
3379 },
3380 {
3381 .aead = {
3382 .base = {
3383 .cra_name = "echainiv(authenc(hmac(sha384),"
3384 "cbc(aes)))",
3385 .cra_driver_name = "echainiv-authenc-"
3386 "hmac-sha384-cbc-aes-caam",
3387 .cra_blocksize = AES_BLOCK_SIZE,
3388 },
3389 .setkey = aead_setkey,
3390 .setauthsize = aead_setauthsize,
3391 .encrypt = aead_encrypt,
3392 .decrypt = aead_decrypt,
3393 .ivsize = AES_BLOCK_SIZE,
3394 .maxauthsize = SHA384_DIGEST_SIZE,
3395 },
3396 .caam = {
3397 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3398 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3399 OP_ALG_AAI_HMAC_PRECOMP,
3400 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3401 .geniv = true,
3402 },
3403 },
3404 {
3405 .aead = {
3406 .base = {
3407 .cra_name = "authenc(hmac(sha512),cbc(aes))",
3408 .cra_driver_name = "authenc-hmac-sha512-"
3409 "cbc-aes-caam",
3410 .cra_blocksize = AES_BLOCK_SIZE,
3411 },
3412 .setkey = aead_setkey,
3413 .setauthsize = aead_setauthsize,
3414 .encrypt = aead_encrypt,
3415 .decrypt = aead_decrypt,
3416 .ivsize = AES_BLOCK_SIZE,
3417 .maxauthsize = SHA512_DIGEST_SIZE,
3418 },
3419 .caam = {
3420 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3421 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3422 OP_ALG_AAI_HMAC_PRECOMP,
3423 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3424 },
3425 },
3426 {
3427 .aead = {
3428 .base = {
3429 .cra_name = "echainiv(authenc(hmac(sha512),"
3430 "cbc(aes)))",
3431 .cra_driver_name = "echainiv-authenc-"
3432 "hmac-sha512-cbc-aes-caam",
3433 .cra_blocksize = AES_BLOCK_SIZE,
3434 },
3435 .setkey = aead_setkey,
3436 .setauthsize = aead_setauthsize,
3437 .encrypt = aead_encrypt,
3438 .decrypt = aead_decrypt,
3439 .ivsize = AES_BLOCK_SIZE,
3440 .maxauthsize = SHA512_DIGEST_SIZE,
3441 },
3442 .caam = {
3443 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3444 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3445 OP_ALG_AAI_HMAC_PRECOMP,
3446 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3447 .geniv = true,
3448 },
3449 },
3450 {
3451 .aead = {
3452 .base = {
3453 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3454 .cra_driver_name = "authenc-hmac-md5-"
3455 "cbc-des3_ede-caam",
3456 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3457 },
3458 .setkey = aead_setkey,
3459 .setauthsize = aead_setauthsize,
3460 .encrypt = aead_encrypt,
3461 .decrypt = aead_decrypt,
3462 .ivsize = DES3_EDE_BLOCK_SIZE,
3463 .maxauthsize = MD5_DIGEST_SIZE,
3464 },
3465 .caam = {
3466 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3467 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3468 OP_ALG_AAI_HMAC_PRECOMP,
3469 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3470 }
3471 },
3472 {
3473 .aead = {
3474 .base = {
3475 .cra_name = "echainiv(authenc(hmac(md5),"
3476 "cbc(des3_ede)))",
3477 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3478 "cbc-des3_ede-caam",
3479 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3480 },
3481 .setkey = aead_setkey,
3482 .setauthsize = aead_setauthsize,
3483 .encrypt = aead_encrypt,
3484 .decrypt = aead_decrypt,
3485 .ivsize = DES3_EDE_BLOCK_SIZE,
3486 .maxauthsize = MD5_DIGEST_SIZE,
3487 },
3488 .caam = {
3489 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3490 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3491 OP_ALG_AAI_HMAC_PRECOMP,
3492 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3493 .geniv = true,
3494 }
3495 },
3496 {
3497 .aead = {
3498 .base = {
3499 .cra_name = "authenc(hmac(sha1),"
3500 "cbc(des3_ede))",
3501 .cra_driver_name = "authenc-hmac-sha1-"
3502 "cbc-des3_ede-caam",
3503 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3504 },
3505 .setkey = aead_setkey,
3506 .setauthsize = aead_setauthsize,
3507 .encrypt = aead_encrypt,
3508 .decrypt = aead_decrypt,
3509 .ivsize = DES3_EDE_BLOCK_SIZE,
3510 .maxauthsize = SHA1_DIGEST_SIZE,
3511 },
3512 .caam = {
3513 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3514 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3515 OP_ALG_AAI_HMAC_PRECOMP,
3516 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3517 },
3518 },
3519 {
3520 .aead = {
3521 .base = {
3522 .cra_name = "echainiv(authenc(hmac(sha1),"
3523 "cbc(des3_ede)))",
3524 .cra_driver_name = "echainiv-authenc-"
3525 "hmac-sha1-"
3526 "cbc-des3_ede-caam",
3527 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3528 },
3529 .setkey = aead_setkey,
3530 .setauthsize = aead_setauthsize,
3531 .encrypt = aead_encrypt,
3532 .decrypt = aead_decrypt,
3533 .ivsize = DES3_EDE_BLOCK_SIZE,
3534 .maxauthsize = SHA1_DIGEST_SIZE,
3535 },
3536 .caam = {
3537 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3538 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3539 OP_ALG_AAI_HMAC_PRECOMP,
3540 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3541 .geniv = true,
3542 },
3543 },
3544 {
3545 .aead = {
3546 .base = {
3547 .cra_name = "authenc(hmac(sha224),"
3548 "cbc(des3_ede))",
3549 .cra_driver_name = "authenc-hmac-sha224-"
3550 "cbc-des3_ede-caam",
3551 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3552 },
3553 .setkey = aead_setkey,
3554 .setauthsize = aead_setauthsize,
3555 .encrypt = aead_encrypt,
3556 .decrypt = aead_decrypt,
3557 .ivsize = DES3_EDE_BLOCK_SIZE,
3558 .maxauthsize = SHA224_DIGEST_SIZE,
3559 },
3560 .caam = {
3561 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3562 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3563 OP_ALG_AAI_HMAC_PRECOMP,
3564 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3565 },
3566 },
3567 {
3568 .aead = {
3569 .base = {
3570 .cra_name = "echainiv(authenc(hmac(sha224),"
3571 "cbc(des3_ede)))",
3572 .cra_driver_name = "echainiv-authenc-"
3573 "hmac-sha224-"
3574 "cbc-des3_ede-caam",
3575 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3576 },
3577 .setkey = aead_setkey,
3578 .setauthsize = aead_setauthsize,
3579 .encrypt = aead_encrypt,
3580 .decrypt = aead_decrypt,
3581 .ivsize = DES3_EDE_BLOCK_SIZE,
3582 .maxauthsize = SHA224_DIGEST_SIZE,
3583 },
3584 .caam = {
3585 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3586 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3587 OP_ALG_AAI_HMAC_PRECOMP,
3588 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3589 .geniv = true,
3590 },
3591 },
3592 {
3593 .aead = {
3594 .base = {
3595 .cra_name = "authenc(hmac(sha256),"
3596 "cbc(des3_ede))",
3597 .cra_driver_name = "authenc-hmac-sha256-"
3598 "cbc-des3_ede-caam",
3599 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3600 },
3601 .setkey = aead_setkey,
3602 .setauthsize = aead_setauthsize,
3603 .encrypt = aead_encrypt,
3604 .decrypt = aead_decrypt,
3605 .ivsize = DES3_EDE_BLOCK_SIZE,
3606 .maxauthsize = SHA256_DIGEST_SIZE,
3607 },
3608 .caam = {
3609 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3610 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3611 OP_ALG_AAI_HMAC_PRECOMP,
3612 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3613 },
3614 },
3615 {
3616 .aead = {
3617 .base = {
3618 .cra_name = "echainiv(authenc(hmac(sha256),"
3619 "cbc(des3_ede)))",
3620 .cra_driver_name = "echainiv-authenc-"
3621 "hmac-sha256-"
3622 "cbc-des3_ede-caam",
3623 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3624 },
3625 .setkey = aead_setkey,
3626 .setauthsize = aead_setauthsize,
3627 .encrypt = aead_encrypt,
3628 .decrypt = aead_decrypt,
3629 .ivsize = DES3_EDE_BLOCK_SIZE,
3630 .maxauthsize = SHA256_DIGEST_SIZE,
3631 },
3632 .caam = {
3633 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3634 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3635 OP_ALG_AAI_HMAC_PRECOMP,
3636 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3637 .geniv = true,
3638 },
3639 },
3640 {
3641 .aead = {
3642 .base = {
3643 .cra_name = "authenc(hmac(sha384),"
3644 "cbc(des3_ede))",
3645 .cra_driver_name = "authenc-hmac-sha384-"
3646 "cbc-des3_ede-caam",
3647 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3648 },
3649 .setkey = aead_setkey,
3650 .setauthsize = aead_setauthsize,
3651 .encrypt = aead_encrypt,
3652 .decrypt = aead_decrypt,
3653 .ivsize = DES3_EDE_BLOCK_SIZE,
3654 .maxauthsize = SHA384_DIGEST_SIZE,
3655 },
3656 .caam = {
3657 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3658 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3659 OP_ALG_AAI_HMAC_PRECOMP,
3660 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3661 },
3662 },
3663 {
3664 .aead = {
3665 .base = {
3666 .cra_name = "echainiv(authenc(hmac(sha384),"
3667 "cbc(des3_ede)))",
3668 .cra_driver_name = "echainiv-authenc-"
3669 "hmac-sha384-"
3670 "cbc-des3_ede-caam",
3671 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3672 },
3673 .setkey = aead_setkey,
3674 .setauthsize = aead_setauthsize,
3675 .encrypt = aead_encrypt,
3676 .decrypt = aead_decrypt,
3677 .ivsize = DES3_EDE_BLOCK_SIZE,
3678 .maxauthsize = SHA384_DIGEST_SIZE,
3679 },
3680 .caam = {
3681 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3682 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3683 OP_ALG_AAI_HMAC_PRECOMP,
3684 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3685 .geniv = true,
3686 },
3687 },
3688 {
3689 .aead = {
3690 .base = {
3691 .cra_name = "authenc(hmac(sha512),"
3692 "cbc(des3_ede))",
3693 .cra_driver_name = "authenc-hmac-sha512-"
3694 "cbc-des3_ede-caam",
3695 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3696 },
3697 .setkey = aead_setkey,
3698 .setauthsize = aead_setauthsize,
3699 .encrypt = aead_encrypt,
3700 .decrypt = aead_decrypt,
3701 .ivsize = DES3_EDE_BLOCK_SIZE,
3702 .maxauthsize = SHA512_DIGEST_SIZE,
3703 },
3704 .caam = {
3705 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3706 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3707 OP_ALG_AAI_HMAC_PRECOMP,
3708 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3709 },
3710 },
3711 {
3712 .aead = {
3713 .base = {
3714 .cra_name = "echainiv(authenc(hmac(sha512),"
3715 "cbc(des3_ede)))",
3716 .cra_driver_name = "echainiv-authenc-"
3717 "hmac-sha512-"
3718 "cbc-des3_ede-caam",
3719 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3720 },
3721 .setkey = aead_setkey,
3722 .setauthsize = aead_setauthsize,
3723 .encrypt = aead_encrypt,
3724 .decrypt = aead_decrypt,
3725 .ivsize = DES3_EDE_BLOCK_SIZE,
3726 .maxauthsize = SHA512_DIGEST_SIZE,
3727 },
3728 .caam = {
3729 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3730 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3731 OP_ALG_AAI_HMAC_PRECOMP,
3732 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3733 .geniv = true,
3734 },
3735 },
3736 {
3737 .aead = {
3738 .base = {
3739 .cra_name = "authenc(hmac(md5),cbc(des))",
3740 .cra_driver_name = "authenc-hmac-md5-"
3741 "cbc-des-caam",
3742 .cra_blocksize = DES_BLOCK_SIZE,
3743 },
3744 .setkey = aead_setkey,
3745 .setauthsize = aead_setauthsize,
3746 .encrypt = aead_encrypt,
3747 .decrypt = aead_decrypt,
3748 .ivsize = DES_BLOCK_SIZE,
3749 .maxauthsize = MD5_DIGEST_SIZE,
3750 },
3751 .caam = {
3752 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3753 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3754 OP_ALG_AAI_HMAC_PRECOMP,
3755 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3756 },
3757 },
3758 {
3759 .aead = {
3760 .base = {
3761 .cra_name = "echainiv(authenc(hmac(md5),"
3762 "cbc(des)))",
3763 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3764 "cbc-des-caam",
3765 .cra_blocksize = DES_BLOCK_SIZE,
3766 },
3767 .setkey = aead_setkey,
3768 .setauthsize = aead_setauthsize,
3769 .encrypt = aead_encrypt,
3770 .decrypt = aead_decrypt,
3771 .ivsize = DES_BLOCK_SIZE,
3772 .maxauthsize = MD5_DIGEST_SIZE,
3773 },
3774 .caam = {
3775 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3776 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3777 OP_ALG_AAI_HMAC_PRECOMP,
3778 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3779 .geniv = true,
3780 },
3781 },
3782 {
3783 .aead = {
3784 .base = {
3785 .cra_name = "authenc(hmac(sha1),cbc(des))",
3786 .cra_driver_name = "authenc-hmac-sha1-"
3787 "cbc-des-caam",
3788 .cra_blocksize = DES_BLOCK_SIZE,
3789 },
3790 .setkey = aead_setkey,
3791 .setauthsize = aead_setauthsize,
3792 .encrypt = aead_encrypt,
3793 .decrypt = aead_decrypt,
3794 .ivsize = DES_BLOCK_SIZE,
3795 .maxauthsize = SHA1_DIGEST_SIZE,
3796 },
3797 .caam = {
3798 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3799 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3800 OP_ALG_AAI_HMAC_PRECOMP,
3801 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3802 },
3803 },
3804 {
3805 .aead = {
3806 .base = {
3807 .cra_name = "echainiv(authenc(hmac(sha1),"
3808 "cbc(des)))",
3809 .cra_driver_name = "echainiv-authenc-"
3810 "hmac-sha1-cbc-des-caam",
3811 .cra_blocksize = DES_BLOCK_SIZE,
3812 },
3813 .setkey = aead_setkey,
3814 .setauthsize = aead_setauthsize,
3815 .encrypt = aead_encrypt,
3816 .decrypt = aead_decrypt,
3817 .ivsize = DES_BLOCK_SIZE,
3818 .maxauthsize = SHA1_DIGEST_SIZE,
3819 },
3820 .caam = {
3821 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3822 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3823 OP_ALG_AAI_HMAC_PRECOMP,
3824 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3825 .geniv = true,
3826 },
3827 },
3828 {
3829 .aead = {
3830 .base = {
3831 .cra_name = "authenc(hmac(sha224),cbc(des))",
3832 .cra_driver_name = "authenc-hmac-sha224-"
3833 "cbc-des-caam",
3834 .cra_blocksize = DES_BLOCK_SIZE,
3835 },
3836 .setkey = aead_setkey,
3837 .setauthsize = aead_setauthsize,
3838 .encrypt = aead_encrypt,
3839 .decrypt = aead_decrypt,
3840 .ivsize = DES_BLOCK_SIZE,
3841 .maxauthsize = SHA224_DIGEST_SIZE,
3842 },
3843 .caam = {
3844 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3845 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3846 OP_ALG_AAI_HMAC_PRECOMP,
3847 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3848 },
3849 },
3850 {
3851 .aead = {
3852 .base = {
3853 .cra_name = "echainiv(authenc(hmac(sha224),"
3854 "cbc(des)))",
3855 .cra_driver_name = "echainiv-authenc-"
3856 "hmac-sha224-cbc-des-caam",
3857 .cra_blocksize = DES_BLOCK_SIZE,
3858 },
3859 .setkey = aead_setkey,
3860 .setauthsize = aead_setauthsize,
3861 .encrypt = aead_encrypt,
3862 .decrypt = aead_decrypt,
3863 .ivsize = DES_BLOCK_SIZE,
3864 .maxauthsize = SHA224_DIGEST_SIZE,
3865 },
3866 .caam = {
3867 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3868 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3869 OP_ALG_AAI_HMAC_PRECOMP,
3870 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3871 .geniv = true,
3872 },
3873 },
3874 {
3875 .aead = {
3876 .base = {
3877 .cra_name = "authenc(hmac(sha256),cbc(des))",
3878 .cra_driver_name = "authenc-hmac-sha256-"
3879 "cbc-des-caam",
3880 .cra_blocksize = DES_BLOCK_SIZE,
3881 },
3882 .setkey = aead_setkey,
3883 .setauthsize = aead_setauthsize,
3884 .encrypt = aead_encrypt,
3885 .decrypt = aead_decrypt,
3886 .ivsize = DES_BLOCK_SIZE,
3887 .maxauthsize = SHA256_DIGEST_SIZE,
3888 },
3889 .caam = {
3890 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3891 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3892 OP_ALG_AAI_HMAC_PRECOMP,
3893 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3894 },
3895 },
3896 {
3897 .aead = {
3898 .base = {
3899 .cra_name = "echainiv(authenc(hmac(sha256),"
3900 "cbc(des)))",
3901 .cra_driver_name = "echainiv-authenc-"
3902 "hmac-sha256-cbc-des-caam",
3903 .cra_blocksize = DES_BLOCK_SIZE,
3904 },
3905 .setkey = aead_setkey,
3906 .setauthsize = aead_setauthsize,
3907 .encrypt = aead_encrypt,
3908 .decrypt = aead_decrypt,
3909 .ivsize = DES_BLOCK_SIZE,
3910 .maxauthsize = SHA256_DIGEST_SIZE,
3911 },
3912 .caam = {
3913 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3914 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3915 OP_ALG_AAI_HMAC_PRECOMP,
3916 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3917 .geniv = true,
3918 },
3919 },
3920 {
3921 .aead = {
3922 .base = {
3923 .cra_name = "authenc(hmac(sha384),cbc(des))",
3924 .cra_driver_name = "authenc-hmac-sha384-"
3925 "cbc-des-caam",
3926 .cra_blocksize = DES_BLOCK_SIZE,
3927 },
3928 .setkey = aead_setkey,
3929 .setauthsize = aead_setauthsize,
3930 .encrypt = aead_encrypt,
3931 .decrypt = aead_decrypt,
3932 .ivsize = DES_BLOCK_SIZE,
3933 .maxauthsize = SHA384_DIGEST_SIZE,
3934 },
3935 .caam = {
3936 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3937 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3938 OP_ALG_AAI_HMAC_PRECOMP,
3939 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3940 },
3941 },
3942 {
3943 .aead = {
3944 .base = {
3945 .cra_name = "echainiv(authenc(hmac(sha384),"
3946 "cbc(des)))",
3947 .cra_driver_name = "echainiv-authenc-"
3948 "hmac-sha384-cbc-des-caam",
3949 .cra_blocksize = DES_BLOCK_SIZE,
3950 },
3951 .setkey = aead_setkey,
3952 .setauthsize = aead_setauthsize,
3953 .encrypt = aead_encrypt,
3954 .decrypt = aead_decrypt,
3955 .ivsize = DES_BLOCK_SIZE,
3956 .maxauthsize = SHA384_DIGEST_SIZE,
3957 },
3958 .caam = {
3959 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3960 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3961 OP_ALG_AAI_HMAC_PRECOMP,
3962 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3963 .geniv = true,
3964 },
3965 },
3966 {
3967 .aead = {
3968 .base = {
3969 .cra_name = "authenc(hmac(sha512),cbc(des))",
3970 .cra_driver_name = "authenc-hmac-sha512-"
3971 "cbc-des-caam",
3972 .cra_blocksize = DES_BLOCK_SIZE,
3973 },
3974 .setkey = aead_setkey,
3975 .setauthsize = aead_setauthsize,
3976 .encrypt = aead_encrypt,
3977 .decrypt = aead_decrypt,
3978 .ivsize = DES_BLOCK_SIZE,
3979 .maxauthsize = SHA512_DIGEST_SIZE,
3980 },
3981 .caam = {
3982 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3983 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3984 OP_ALG_AAI_HMAC_PRECOMP,
3985 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3986 },
3987 },
3988 {
3989 .aead = {
3990 .base = {
3991 .cra_name = "echainiv(authenc(hmac(sha512),"
3992 "cbc(des)))",
3993 .cra_driver_name = "echainiv-authenc-"
3994 "hmac-sha512-cbc-des-caam",
3995 .cra_blocksize = DES_BLOCK_SIZE,
3996 },
3997 .setkey = aead_setkey,
3998 .setauthsize = aead_setauthsize,
3999 .encrypt = aead_encrypt,
4000 .decrypt = aead_decrypt,
4001 .ivsize = DES_BLOCK_SIZE,
4002 .maxauthsize = SHA512_DIGEST_SIZE,
4003 },
4004 .caam = {
4005 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4006 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4007 OP_ALG_AAI_HMAC_PRECOMP,
4008 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4009 .geniv = true,
4010 },
4011 },
4012 {
4013 .aead = {
4014 .base = {
4015 .cra_name = "authenc(hmac(md5),"
4016 "rfc3686(ctr(aes)))",
4017 .cra_driver_name = "authenc-hmac-md5-"
4018 "rfc3686-ctr-aes-caam",
4019 .cra_blocksize = 1,
4020 },
4021 .setkey = aead_setkey,
4022 .setauthsize = aead_setauthsize,
4023 .encrypt = aead_encrypt,
4024 .decrypt = aead_decrypt,
4025 .ivsize = CTR_RFC3686_IV_SIZE,
4026 .maxauthsize = MD5_DIGEST_SIZE,
4027 },
4028 .caam = {
4029 .class1_alg_type = OP_ALG_ALGSEL_AES |
4030 OP_ALG_AAI_CTR_MOD128,
4031 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
4032 OP_ALG_AAI_HMAC_PRECOMP,
4033 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4034 .rfc3686 = true,
4035 },
4036 },
4037 {
4038 .aead = {
4039 .base = {
4040 .cra_name = "seqiv(authenc("
4041 "hmac(md5),rfc3686(ctr(aes))))",
4042 .cra_driver_name = "seqiv-authenc-hmac-md5-"
4043 "rfc3686-ctr-aes-caam",
4044 .cra_blocksize = 1,
4045 },
4046 .setkey = aead_setkey,
4047 .setauthsize = aead_setauthsize,
4048 .encrypt = aead_encrypt,
4049 .decrypt = aead_decrypt,
4050 .ivsize = CTR_RFC3686_IV_SIZE,
4051 .maxauthsize = MD5_DIGEST_SIZE,
4052 },
4053 .caam = {
4054 .class1_alg_type = OP_ALG_ALGSEL_AES |
4055 OP_ALG_AAI_CTR_MOD128,
4056 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
4057 OP_ALG_AAI_HMAC_PRECOMP,
4058 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4059 .rfc3686 = true,
4060 .geniv = true,
4061 },
4062 },
4063 {
4064 .aead = {
4065 .base = {
4066 .cra_name = "authenc(hmac(sha1),"
4067 "rfc3686(ctr(aes)))",
4068 .cra_driver_name = "authenc-hmac-sha1-"
4069 "rfc3686-ctr-aes-caam",
4070 .cra_blocksize = 1,
4071 },
4072 .setkey = aead_setkey,
4073 .setauthsize = aead_setauthsize,
4074 .encrypt = aead_encrypt,
4075 .decrypt = aead_decrypt,
4076 .ivsize = CTR_RFC3686_IV_SIZE,
4077 .maxauthsize = SHA1_DIGEST_SIZE,
4078 },
4079 .caam = {
4080 .class1_alg_type = OP_ALG_ALGSEL_AES |
4081 OP_ALG_AAI_CTR_MOD128,
4082 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4083 OP_ALG_AAI_HMAC_PRECOMP,
4084 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4085 .rfc3686 = true,
4086 },
4087 },
4088 {
4089 .aead = {
4090 .base = {
4091 .cra_name = "seqiv(authenc("
4092 "hmac(sha1),rfc3686(ctr(aes))))",
4093 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
4094 "rfc3686-ctr-aes-caam",
4095 .cra_blocksize = 1,
4096 },
4097 .setkey = aead_setkey,
4098 .setauthsize = aead_setauthsize,
4099 .encrypt = aead_encrypt,
4100 .decrypt = aead_decrypt,
4101 .ivsize = CTR_RFC3686_IV_SIZE,
4102 .maxauthsize = SHA1_DIGEST_SIZE,
4103 },
4104 .caam = {
4105 .class1_alg_type = OP_ALG_ALGSEL_AES |
4106 OP_ALG_AAI_CTR_MOD128,
4107 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4108 OP_ALG_AAI_HMAC_PRECOMP,
4109 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4110 .rfc3686 = true,
4111 .geniv = true,
4112 },
4113 },
4114 {
4115 .aead = {
4116 .base = {
4117 .cra_name = "authenc(hmac(sha224),"
4118 "rfc3686(ctr(aes)))",
4119 .cra_driver_name = "authenc-hmac-sha224-"
4120 "rfc3686-ctr-aes-caam",
4121 .cra_blocksize = 1,
4122 },
4123 .setkey = aead_setkey,
4124 .setauthsize = aead_setauthsize,
4125 .encrypt = aead_encrypt,
4126 .decrypt = aead_decrypt,
4127 .ivsize = CTR_RFC3686_IV_SIZE,
4128 .maxauthsize = SHA224_DIGEST_SIZE,
4129 },
4130 .caam = {
4131 .class1_alg_type = OP_ALG_ALGSEL_AES |
4132 OP_ALG_AAI_CTR_MOD128,
4133 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4134 OP_ALG_AAI_HMAC_PRECOMP,
4135 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4136 .rfc3686 = true,
4137 },
4138 },
4139 {
4140 .aead = {
4141 .base = {
4142 .cra_name = "seqiv(authenc("
4143 "hmac(sha224),rfc3686(ctr(aes))))",
4144 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
4145 "rfc3686-ctr-aes-caam",
4146 .cra_blocksize = 1,
4147 },
4148 .setkey = aead_setkey,
4149 .setauthsize = aead_setauthsize,
4150 .encrypt = aead_encrypt,
4151 .decrypt = aead_decrypt,
4152 .ivsize = CTR_RFC3686_IV_SIZE,
4153 .maxauthsize = SHA224_DIGEST_SIZE,
4154 },
4155 .caam = {
4156 .class1_alg_type = OP_ALG_ALGSEL_AES |
4157 OP_ALG_AAI_CTR_MOD128,
4158 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4159 OP_ALG_AAI_HMAC_PRECOMP,
4160 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4161 .rfc3686 = true,
4162 .geniv = true,
4163 },
4164 },
4165 {
4166 .aead = {
4167 .base = {
4168 .cra_name = "authenc(hmac(sha256),"
4169 "rfc3686(ctr(aes)))",
4170 .cra_driver_name = "authenc-hmac-sha256-"
4171 "rfc3686-ctr-aes-caam",
4172 .cra_blocksize = 1,
4173 },
4174 .setkey = aead_setkey,
4175 .setauthsize = aead_setauthsize,
4176 .encrypt = aead_encrypt,
4177 .decrypt = aead_decrypt,
4178 .ivsize = CTR_RFC3686_IV_SIZE,
4179 .maxauthsize = SHA256_DIGEST_SIZE,
4180 },
4181 .caam = {
4182 .class1_alg_type = OP_ALG_ALGSEL_AES |
4183 OP_ALG_AAI_CTR_MOD128,
4184 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4185 OP_ALG_AAI_HMAC_PRECOMP,
4186 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4187 .rfc3686 = true,
4188 },
4189 },
4190 {
4191 .aead = {
4192 .base = {
4193 .cra_name = "seqiv(authenc(hmac(sha256),"
4194 "rfc3686(ctr(aes))))",
4195 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
4196 "rfc3686-ctr-aes-caam",
4197 .cra_blocksize = 1,
4198 },
4199 .setkey = aead_setkey,
4200 .setauthsize = aead_setauthsize,
4201 .encrypt = aead_encrypt,
4202 .decrypt = aead_decrypt,
4203 .ivsize = CTR_RFC3686_IV_SIZE,
4204 .maxauthsize = SHA256_DIGEST_SIZE,
4205 },
4206 .caam = {
4207 .class1_alg_type = OP_ALG_ALGSEL_AES |
4208 OP_ALG_AAI_CTR_MOD128,
4209 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4210 OP_ALG_AAI_HMAC_PRECOMP,
4211 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4212 .rfc3686 = true,
4213 .geniv = true,
4214 },
4215 },
4216 {
4217 .aead = {
4218 .base = {
4219 .cra_name = "authenc(hmac(sha384),"
4220 "rfc3686(ctr(aes)))",
4221 .cra_driver_name = "authenc-hmac-sha384-"
4222 "rfc3686-ctr-aes-caam",
4223 .cra_blocksize = 1,
4224 },
4225 .setkey = aead_setkey,
4226 .setauthsize = aead_setauthsize,
4227 .encrypt = aead_encrypt,
4228 .decrypt = aead_decrypt,
4229 .ivsize = CTR_RFC3686_IV_SIZE,
4230 .maxauthsize = SHA384_DIGEST_SIZE,
4231 },
4232 .caam = {
4233 .class1_alg_type = OP_ALG_ALGSEL_AES |
4234 OP_ALG_AAI_CTR_MOD128,
4235 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4236 OP_ALG_AAI_HMAC_PRECOMP,
4237 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4238 .rfc3686 = true,
4239 },
4240 },
4241 {
4242 .aead = {
4243 .base = {
4244 .cra_name = "seqiv(authenc(hmac(sha384),"
4245 "rfc3686(ctr(aes))))",
4246 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
4247 "rfc3686-ctr-aes-caam",
4248 .cra_blocksize = 1,
4249 },
4250 .setkey = aead_setkey,
4251 .setauthsize = aead_setauthsize,
4252 .encrypt = aead_encrypt,
4253 .decrypt = aead_decrypt,
4254 .ivsize = CTR_RFC3686_IV_SIZE,
4255 .maxauthsize = SHA384_DIGEST_SIZE,
4256 },
4257 .caam = {
4258 .class1_alg_type = OP_ALG_ALGSEL_AES |
4259 OP_ALG_AAI_CTR_MOD128,
4260 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4261 OP_ALG_AAI_HMAC_PRECOMP,
4262 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4263 .rfc3686 = true,
4264 .geniv = true,
4265 },
4266 },
4267 {
4268 .aead = {
4269 .base = {
4270 .cra_name = "authenc(hmac(sha512),"
4271 "rfc3686(ctr(aes)))",
4272 .cra_driver_name = "authenc-hmac-sha512-"
4273 "rfc3686-ctr-aes-caam",
4274 .cra_blocksize = 1,
4275 },
4276 .setkey = aead_setkey,
4277 .setauthsize = aead_setauthsize,
4278 .encrypt = aead_encrypt,
4279 .decrypt = aead_decrypt,
4280 .ivsize = CTR_RFC3686_IV_SIZE,
4281 .maxauthsize = SHA512_DIGEST_SIZE,
4282 },
4283 .caam = {
4284 .class1_alg_type = OP_ALG_ALGSEL_AES |
4285 OP_ALG_AAI_CTR_MOD128,
4286 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4287 OP_ALG_AAI_HMAC_PRECOMP,
4288 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4289 .rfc3686 = true,
4290 },
4291 },
4292 {
4293 .aead = {
4294 .base = {
4295 .cra_name = "seqiv(authenc(hmac(sha512),"
4296 "rfc3686(ctr(aes))))",
4297 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
4298 "rfc3686-ctr-aes-caam",
4299 .cra_blocksize = 1,
4300 },
4301 .setkey = aead_setkey,
4302 .setauthsize = aead_setauthsize,
4303 .encrypt = aead_encrypt,
4304 .decrypt = aead_decrypt,
4305 .ivsize = CTR_RFC3686_IV_SIZE,
4306 .maxauthsize = SHA512_DIGEST_SIZE,
4307 },
4308 .caam = {
4309 .class1_alg_type = OP_ALG_ALGSEL_AES |
4310 OP_ALG_AAI_CTR_MOD128,
4311 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4312 OP_ALG_AAI_HMAC_PRECOMP,
4313 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4314 .rfc3686 = true,
4315 .geniv = true,
4316 },
4317 },
4318 };
4319
4320 struct caam_crypto_alg {
4321 struct crypto_alg crypto_alg;
4322 struct list_head entry;
4323 struct caam_alg_entry caam;
4324 };
4325
4326 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4327 {
4328 ctx->jrdev = caam_jr_alloc();
4329 if (IS_ERR(ctx->jrdev)) {
4330 pr_err("Job Ring Device allocation for transform failed\n");
4331 return PTR_ERR(ctx->jrdev);
4332 }
4333
4334 /* copy descriptor header template value */
4335 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4336 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4337 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
4338
4339 return 0;
4340 }
4341
4342 static int caam_cra_init(struct crypto_tfm *tfm)
4343 {
4344 struct crypto_alg *alg = tfm->__crt_alg;
4345 struct caam_crypto_alg *caam_alg =
4346 container_of(alg, struct caam_crypto_alg, crypto_alg);
4347 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
4348
4349 return caam_init_common(ctx, &caam_alg->caam);
4350 }
4351
4352 static int caam_aead_init(struct crypto_aead *tfm)
4353 {
4354 struct aead_alg *alg = crypto_aead_alg(tfm);
4355 struct caam_aead_alg *caam_alg =
4356 container_of(alg, struct caam_aead_alg, aead);
4357 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
4358
4359 return caam_init_common(ctx, &caam_alg->caam);
4360 }
4361
4362 static void caam_exit_common(struct caam_ctx *ctx)
4363 {
4364 if (ctx->sh_desc_enc_dma &&
4365 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
4366 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
4367 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
4368 if (ctx->sh_desc_dec_dma &&
4369 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
4370 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
4371 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
4372 if (ctx->sh_desc_givenc_dma &&
4373 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
4374 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
4375 desc_bytes(ctx->sh_desc_givenc),
4376 DMA_TO_DEVICE);
4377 if (ctx->key_dma &&
4378 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
4379 dma_unmap_single(ctx->jrdev, ctx->key_dma,
4380 ctx->enckeylen + ctx->split_key_pad_len,
4381 DMA_TO_DEVICE);
4382
4383 caam_jr_free(ctx->jrdev);
4384 }
4385
4386 static void caam_cra_exit(struct crypto_tfm *tfm)
4387 {
4388 caam_exit_common(crypto_tfm_ctx(tfm));
4389 }
4390
4391 static void caam_aead_exit(struct crypto_aead *tfm)
4392 {
4393 caam_exit_common(crypto_aead_ctx(tfm));
4394 }
4395
4396 static void __exit caam_algapi_exit(void)
4397 {
4398
4399 struct caam_crypto_alg *t_alg, *n;
4400 int i;
4401
4402 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4403 struct caam_aead_alg *t_alg = driver_aeads + i;
4404
4405 if (t_alg->registered)
4406 crypto_unregister_aead(&t_alg->aead);
4407 }
4408
4409 if (!alg_list.next)
4410 return;
4411
4412 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
4413 crypto_unregister_alg(&t_alg->crypto_alg);
4414 list_del(&t_alg->entry);
4415 kfree(t_alg);
4416 }
4417 }
4418
4419 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
4420 *template)
4421 {
4422 struct caam_crypto_alg *t_alg;
4423 struct crypto_alg *alg;
4424
4425 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4426 if (!t_alg) {
4427 pr_err("failed to allocate t_alg\n");
4428 return ERR_PTR(-ENOMEM);
4429 }
4430
4431 alg = &t_alg->crypto_alg;
4432
4433 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
4434 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4435 template->driver_name);
4436 alg->cra_module = THIS_MODULE;
4437 alg->cra_init = caam_cra_init;
4438 alg->cra_exit = caam_cra_exit;
4439 alg->cra_priority = CAAM_CRA_PRIORITY;
4440 alg->cra_blocksize = template->blocksize;
4441 alg->cra_alignmask = 0;
4442 alg->cra_ctxsize = sizeof(struct caam_ctx);
4443 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
4444 template->type;
4445 switch (template->type) {
4446 case CRYPTO_ALG_TYPE_GIVCIPHER:
4447 alg->cra_type = &crypto_givcipher_type;
4448 alg->cra_ablkcipher = template->template_ablkcipher;
4449 break;
4450 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4451 alg->cra_type = &crypto_ablkcipher_type;
4452 alg->cra_ablkcipher = template->template_ablkcipher;
4453 break;
4454 }
4455
4456 t_alg->caam.class1_alg_type = template->class1_alg_type;
4457 t_alg->caam.class2_alg_type = template->class2_alg_type;
4458 t_alg->caam.alg_op = template->alg_op;
4459
4460 return t_alg;
4461 }
4462
4463 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
4464 {
4465 struct aead_alg *alg = &t_alg->aead;
4466
4467 alg->base.cra_module = THIS_MODULE;
4468 alg->base.cra_priority = CAAM_CRA_PRIORITY;
4469 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
4470 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
4471
4472 alg->init = caam_aead_init;
4473 alg->exit = caam_aead_exit;
4474 }
4475
4476 static int __init caam_algapi_init(void)
4477 {
4478 struct device_node *dev_node;
4479 struct platform_device *pdev;
4480 struct device *ctrldev;
4481 struct caam_drv_private *priv;
4482 int i = 0, err = 0;
4483 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
4484 unsigned int md_limit = SHA512_DIGEST_SIZE;
4485 bool registered = false;
4486
4487 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4488 if (!dev_node) {
4489 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4490 if (!dev_node)
4491 return -ENODEV;
4492 }
4493
4494 pdev = of_find_device_by_node(dev_node);
4495 if (!pdev) {
4496 of_node_put(dev_node);
4497 return -ENODEV;
4498 }
4499
4500 ctrldev = &pdev->dev;
4501 priv = dev_get_drvdata(ctrldev);
4502 of_node_put(dev_node);
4503
4504 /*
4505 * If priv is NULL, it's probably because the caam driver wasn't
4506 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4507 */
4508 if (!priv)
4509 return -ENODEV;
4510
4511
4512 INIT_LIST_HEAD(&alg_list);
4513
4514 /*
4515 * Register crypto algorithms the device supports.
4516 * First, detect presence and attributes of DES, AES, and MD blocks.
4517 */
4518 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4519 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4520 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
4521 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
4522 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4523
4524 /* If MD is present, limit digest size based on LP256 */
4525 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
4526 md_limit = SHA256_DIGEST_SIZE;
4527
4528 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4529 struct caam_crypto_alg *t_alg;
4530 struct caam_alg_template *alg = driver_algs + i;
4531 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
4532
4533 /* Skip DES algorithms if not supported by device */
4534 if (!des_inst &&
4535 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
4536 (alg_sel == OP_ALG_ALGSEL_DES)))
4537 continue;
4538
4539 /* Skip AES algorithms if not supported by device */
4540 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
4541 continue;
4542
4543 t_alg = caam_alg_alloc(alg);
4544 if (IS_ERR(t_alg)) {
4545 err = PTR_ERR(t_alg);
4546 pr_warn("%s alg allocation failed\n", alg->driver_name);
4547 continue;
4548 }
4549
4550 err = crypto_register_alg(&t_alg->crypto_alg);
4551 if (err) {
4552 pr_warn("%s alg registration failed\n",
4553 t_alg->crypto_alg.cra_driver_name);
4554 kfree(t_alg);
4555 continue;
4556 }
4557
4558 list_add_tail(&t_alg->entry, &alg_list);
4559 registered = true;
4560 }
4561
4562 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4563 struct caam_aead_alg *t_alg = driver_aeads + i;
4564 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
4565 OP_ALG_ALGSEL_MASK;
4566 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
4567 OP_ALG_ALGSEL_MASK;
4568 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
4569
4570 /* Skip DES algorithms if not supported by device */
4571 if (!des_inst &&
4572 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
4573 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
4574 continue;
4575
4576 /* Skip AES algorithms if not supported by device */
4577 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
4578 continue;
4579
4580 /*
4581 * Check support for AES algorithms not available
4582 * on LP devices.
4583 */
4584 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
4585 if (alg_aai == OP_ALG_AAI_GCM)
4586 continue;
4587
4588 /*
4589 * Skip algorithms requiring message digests
4590 * if MD or MD size is not supported by device.
4591 */
4592 if (c2_alg_sel &&
4593 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
4594 continue;
4595
4596 caam_aead_alg_init(t_alg);
4597
4598 err = crypto_register_aead(&t_alg->aead);
4599 if (err) {
4600 pr_warn("%s alg registration failed\n",
4601 t_alg->aead.base.cra_driver_name);
4602 continue;
4603 }
4604
4605 t_alg->registered = true;
4606 registered = true;
4607 }
4608
4609 if (registered)
4610 pr_info("caam algorithms registered in /proc/crypto\n");
4611
4612 return err;
4613 }
4614
4615 module_init(caam_algapi_init);
4616 module_exit(caam_algapi_exit);
4617
4618 MODULE_LICENSE("GPL");
4619 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4620 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
This page took 0.131504 seconds and 5 git commands to generate.