Merge remote-tracking branch 'ftrace/for-next'
[deliverable/linux.git] / drivers / crypto / marvell / hash.c
CommitLineData
f63601fd
BB
1/*
2 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
3 *
4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
5 * Author: Arnaud Ebalard <arno@natisbad.org>
6 *
7 * This work is based on an initial version written by
8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 */
14
7aeef693 15#include <crypto/md5.h>
f63601fd
BB
16#include <crypto/sha.h>
17
18#include "cesa.h"
19
db509a45
BB
20struct mv_cesa_ahash_dma_iter {
21 struct mv_cesa_dma_iter base;
22 struct mv_cesa_sg_dma_iter src;
23};
24
25static inline void
26mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
27 struct ahash_request *req)
28{
29 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
bd274b10 30 unsigned int len = req->nbytes + creq->cache_ptr;
db509a45
BB
31
32 if (!creq->last_req)
bd274b10 33 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
db509a45
BB
34
35 mv_cesa_req_dma_iter_init(&iter->base, len);
36 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
37 iter->src.op_offset = creq->cache_ptr;
38}
39
40static inline bool
41mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
42{
43 iter->src.op_offset = 0;
44
45 return mv_cesa_req_dma_iter_next_op(&iter->base);
46}
47
7850c91b
BB
48static inline int
49mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
db509a45 50{
7850c91b
BB
51 req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
52 &req->cache_dma);
53 if (!req->cache)
f63601fd
BB
54 return -ENOMEM;
55
56 return 0;
57}
58
7850c91b
BB
59static inline void
60mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
f63601fd 61{
7850c91b 62 if (!req->cache)
f63601fd
BB
63 return;
64
7850c91b
BB
65 dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
66 req->cache_dma);
f63601fd
BB
67}
68
db509a45
BB
69static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
70 gfp_t flags)
71{
72 if (req->padding)
73 return 0;
74
75 req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
76 &req->padding_dma);
77 if (!req->padding)
78 return -ENOMEM;
79
80 return 0;
81}
82
83static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
84{
85 if (!req->padding)
86 return;
87
88 dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
89 req->padding_dma);
90 req->padding = NULL;
91}
92
93static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
94{
95 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
96
97 mv_cesa_ahash_dma_free_padding(&creq->req.dma);
98}
99
100static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
101{
102 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
103
104 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
7850c91b 105 mv_cesa_ahash_dma_free_cache(&creq->req.dma);
53da740f 106 mv_cesa_dma_cleanup(&creq->base);
db509a45
BB
107}
108
109static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
110{
111 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
112
53da740f 113 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
db509a45
BB
114 mv_cesa_ahash_dma_cleanup(req);
115}
116
f63601fd
BB
117static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
118{
119 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
120
53da740f 121 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
db509a45 122 mv_cesa_ahash_dma_last_cleanup(req);
f63601fd
BB
123}
124
125static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
126{
127 unsigned int index, padlen;
128
129 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
130 padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
131
132 return padlen;
133}
134
135static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
136{
f63601fd
BB
137 unsigned int index, padlen;
138
139 buf[0] = 0x80;
140 /* Pad out to 56 mod 64 */
141 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
142 padlen = mv_cesa_ahash_pad_len(creq);
143 memset(buf + 1, 0, padlen - 1);
51954a96
RK
144
145 if (creq->algo_le) {
146 __le64 bits = cpu_to_le64(creq->len << 3);
147 memcpy(buf + padlen, &bits, sizeof(bits));
148 } else {
149 __be64 bits = cpu_to_be64(creq->len << 3);
150 memcpy(buf + padlen, &bits, sizeof(bits));
151 }
f63601fd
BB
152
153 return padlen + 8;
154}
155
156static void mv_cesa_ahash_std_step(struct ahash_request *req)
157{
158 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
159 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
53da740f 160 struct mv_cesa_engine *engine = creq->base.engine;
f63601fd
BB
161 struct mv_cesa_op_ctx *op;
162 unsigned int new_cache_ptr = 0;
163 u32 frag_mode;
164 size_t len;
2786cee8
RP
165 unsigned int digsize;
166 int i;
167
168 mv_cesa_adjust_op(engine, &creq->op_tmpl);
169 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
170
171 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
172 for (i = 0; i < digsize / 4; i++)
173 writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
f63601fd 174
85030c51
RP
175 mv_cesa_adjust_op(engine, &creq->op_tmpl);
176 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
177
f63601fd 178 if (creq->cache_ptr)
0f3304dc
RK
179 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
180 creq->cache, creq->cache_ptr);
f63601fd
BB
181
182 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
183 CESA_SA_SRAM_PAYLOAD_SIZE);
184
185 if (!creq->last_req) {
186 new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
187 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
188 }
189
190 if (len - creq->cache_ptr)
191 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
192 engine->sram +
193 CESA_SA_DATA_SRAM_OFFSET +
194 creq->cache_ptr,
195 len - creq->cache_ptr,
196 sreq->offset);
197
198 op = &creq->op_tmpl;
199
200 frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
201
202 if (creq->last_req && sreq->offset == req->nbytes &&
203 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
204 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
205 frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
206 else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
207 frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
208 }
209
210 if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
211 frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
212 if (len &&
213 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
214 mv_cesa_set_mac_op_total_len(op, creq->len);
215 } else {
216 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
217
218 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
219 len &= CESA_HASH_BLOCK_SIZE_MSK;
220 new_cache_ptr = 64 - trailerlen;
0f3304dc
RK
221 memcpy_fromio(creq->cache,
222 engine->sram +
223 CESA_SA_DATA_SRAM_OFFSET + len,
224 new_cache_ptr);
f63601fd
BB
225 } else {
226 len += mv_cesa_ahash_pad_req(creq,
227 engine->sram + len +
228 CESA_SA_DATA_SRAM_OFFSET);
229 }
230
231 if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
232 frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
233 else
234 frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
235 }
236 }
237
238 mv_cesa_set_mac_op_frag_len(op, len);
239 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
240
241 /* FIXME: only update enc_len field */
0f3304dc 242 memcpy_toio(engine->sram, op, sizeof(*op));
f63601fd
BB
243
244 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
245 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
246 CESA_SA_DESC_CFG_FRAG_MSK);
247
248 creq->cache_ptr = new_cache_ptr;
249
250 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
b1508561 251 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
f6283088
RP
252 BUG_ON(readl(engine->regs + CESA_SA_CMD) &
253 CESA_SA_CMD_EN_CESA_SA_ACCL0);
f63601fd
BB
254 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
255}
256
257static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
258{
259 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
260 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
261
262 if (sreq->offset < (req->nbytes - creq->cache_ptr))
263 return -EINPROGRESS;
264
265 return 0;
266}
267
db509a45
BB
268static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
269{
270 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
53da740f 271 struct mv_cesa_req *basereq = &creq->base;
db509a45 272
53da740f 273 mv_cesa_dma_prepare(basereq, basereq->engine);
db509a45
BB
274}
275
f63601fd
BB
276static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
277{
278 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
279 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
f63601fd
BB
280
281 sreq->offset = 0;
f63601fd
BB
282}
283
284static void mv_cesa_ahash_step(struct crypto_async_request *req)
285{
286 struct ahash_request *ahashreq = ahash_request_cast(req);
db509a45 287 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
f63601fd 288
53da740f
RP
289 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
290 mv_cesa_dma_step(&creq->base);
db509a45
BB
291 else
292 mv_cesa_ahash_std_step(ahashreq);
f63601fd
BB
293}
294
295static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
296{
297 struct ahash_request *ahashreq = ahash_request_cast(req);
298 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
f63601fd 299
53da740f 300 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
1bf6682c 301 return mv_cesa_dma_process(&creq->base, status);
db509a45 302
1bf6682c
RP
303 return mv_cesa_ahash_std_process(ahashreq, status);
304}
305
306static void mv_cesa_ahash_complete(struct crypto_async_request *req)
307{
308 struct ahash_request *ahashreq = ahash_request_cast(req);
309 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
310 struct mv_cesa_engine *engine = creq->base.engine;
311 unsigned int digsize;
312 int i;
f63601fd
BB
313
314 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
315 for (i = 0; i < digsize / 4; i++)
b1508561 316 creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
f63601fd 317
f63601fd 318 if (creq->last_req) {
4c2b130c
RK
319 /*
320 * Hardware's MD5 digest is in little endian format, but
321 * SHA in big endian format
322 */
a9eb678f 323 if (creq->algo_le) {
4c2b130c
RK
324 __le32 *result = (void *)ahashreq->result;
325
326 for (i = 0; i < digsize / 4; i++)
327 result[i] = cpu_to_le32(creq->state[i]);
328 } else {
329 __be32 *result = (void *)ahashreq->result;
f63601fd 330
4c2b130c
RK
331 for (i = 0; i < digsize / 4; i++)
332 result[i] = cpu_to_be32(creq->state[i]);
333 }
f63601fd 334 }
bf8f91e7
RP
335
336 atomic_sub(ahashreq->nbytes, &engine->load);
f63601fd
BB
337}
338
339static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
340 struct mv_cesa_engine *engine)
341{
342 struct ahash_request *ahashreq = ahash_request_cast(req);
343 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
f63601fd 344
53da740f 345 creq->base.engine = engine;
f63601fd 346
53da740f 347 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
db509a45
BB
348 mv_cesa_ahash_dma_prepare(ahashreq);
349 else
350 mv_cesa_ahash_std_prepare(ahashreq);
f63601fd
BB
351}
352
353static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
354{
355 struct ahash_request *ahashreq = ahash_request_cast(req);
356 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
357
358 if (creq->last_req)
359 mv_cesa_ahash_last_cleanup(ahashreq);
db509a45
BB
360
361 mv_cesa_ahash_cleanup(ahashreq);
64ec6ccb
RP
362
363 if (creq->cache_ptr)
364 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
365 creq->cache,
366 creq->cache_ptr,
367 ahashreq->nbytes - creq->cache_ptr);
f63601fd
BB
368}
369
370static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
371 .step = mv_cesa_ahash_step,
372 .process = mv_cesa_ahash_process,
f63601fd 373 .cleanup = mv_cesa_ahash_req_cleanup,
1bf6682c 374 .complete = mv_cesa_ahash_complete,
f63601fd
BB
375};
376
3e5c66c9 377static void mv_cesa_ahash_init(struct ahash_request *req,
a9eb678f 378 struct mv_cesa_op_ctx *tmpl, bool algo_le)
f63601fd
BB
379{
380 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
381
382 memset(creq, 0, sizeof(*creq));
383 mv_cesa_update_op_cfg(tmpl,
384 CESA_SA_DESC_CFG_OP_MAC_ONLY |
385 CESA_SA_DESC_CFG_FIRST_FRAG,
386 CESA_SA_DESC_CFG_OP_MSK |
387 CESA_SA_DESC_CFG_FRAG_MSK);
388 mv_cesa_set_mac_op_total_len(tmpl, 0);
389 mv_cesa_set_mac_op_frag_len(tmpl, 0);
390 creq->op_tmpl = *tmpl;
391 creq->len = 0;
a9eb678f 392 creq->algo_le = algo_le;
f63601fd
BB
393}
394
395static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
396{
397 struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
398
399 ctx->base.ops = &mv_cesa_ahash_req_ops;
400
401 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
402 sizeof(struct mv_cesa_ahash_req));
403 return 0;
404}
405
6dc156f4 406static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
f63601fd
BB
407{
408 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
6dc156f4 409 bool cached = false;
f63601fd 410
47856204 411 if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) {
6dc156f4 412 cached = true;
f63601fd
BB
413
414 if (!req->nbytes)
6dc156f4 415 return cached;
f63601fd
BB
416
417 sg_pcopy_to_buffer(req->src, creq->src_nents,
418 creq->cache + creq->cache_ptr,
419 req->nbytes, 0);
420
421 creq->cache_ptr += req->nbytes;
422 }
423
6dc156f4 424 return cached;
f63601fd
BB
425}
426
db509a45 427static struct mv_cesa_op_ctx *
96212886
RK
428mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
429 struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
430 gfp_t flags)
db509a45 431{
96212886 432 struct mv_cesa_op_ctx *op;
db509a45
BB
433 int ret;
434
96212886
RK
435 op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
436 if (IS_ERR(op))
437 return op;
db509a45 438
96212886
RK
439 /* Set the operation block fragment length. */
440 mv_cesa_set_mac_op_frag_len(op, frag_len);
441
442 /* Append dummy desc to launch operation */
443 ret = mv_cesa_dma_add_dummy_launch(chain, flags);
db509a45
BB
444 if (ret)
445 return ERR_PTR(ret);
446
2f396a91
RK
447 if (mv_cesa_mac_op_is_first_frag(tmpl))
448 mv_cesa_update_op_cfg(tmpl,
449 CESA_SA_DESC_CFG_MID_FRAG,
450 CESA_SA_DESC_CFG_FRAG_MSK);
db509a45
BB
451
452 return op;
453}
454
0971d09a 455static int
db509a45 456mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
db509a45
BB
457 struct mv_cesa_ahash_req *creq,
458 gfp_t flags)
db509a45 459{
db509a45 460 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
7850c91b 461 int ret;
db509a45 462
db509a45 463 if (!creq->cache_ptr)
0971d09a 464 return 0;
db509a45 465
7850c91b
BB
466 ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
467 if (ret)
468 return ret;
469
470 memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
471
0971d09a
RK
472 return mv_cesa_dma_add_data_transfer(chain,
473 CESA_SA_DATA_SRAM_OFFSET,
474 ahashdreq->cache_dma,
475 creq->cache_ptr,
476 CESA_TDMA_DST_IN_SRAM,
477 flags);
db509a45
BB
478}
479
480static struct mv_cesa_op_ctx *
481mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
482 struct mv_cesa_ahash_dma_iter *dma_iter,
483 struct mv_cesa_ahash_req *creq,
58953e15 484 unsigned int frag_len, gfp_t flags)
db509a45
BB
485{
486 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
487 unsigned int len, trailerlen, padoff = 0;
58953e15 488 struct mv_cesa_op_ctx *op;
db509a45
BB
489 int ret;
490
aee84a7e
RK
491 /*
492 * If the transfer is smaller than our maximum length, and we have
493 * some data outstanding, we can ask the engine to finish the hash.
494 */
495 if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
58953e15
RK
496 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
497 flags);
498 if (IS_ERR(op))
499 return op;
db509a45 500
aee84a7e
RK
501 mv_cesa_set_mac_op_total_len(op, creq->len);
502 mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
503 CESA_SA_DESC_CFG_NOT_FRAG :
504 CESA_SA_DESC_CFG_LAST_FRAG,
505 CESA_SA_DESC_CFG_FRAG_MSK);
db509a45
BB
506
507 return op;
508 }
509
aee84a7e
RK
510 /*
511 * The request is longer than the engine can handle, or we have
512 * no data outstanding. Manually generate the padding, adding it
513 * as a "mid" fragment.
514 */
db509a45
BB
515 ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
516 if (ret)
517 return ERR_PTR(ret);
518
519 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
520
ab270e70
RK
521 len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
522 if (len) {
523 ret = mv_cesa_dma_add_data_transfer(chain,
db509a45 524 CESA_SA_DATA_SRAM_OFFSET +
ab270e70 525 frag_len,
db509a45
BB
526 ahashdreq->padding_dma,
527 len, CESA_TDMA_DST_IN_SRAM,
528 flags);
ab270e70
RK
529 if (ret)
530 return ERR_PTR(ret);
db509a45 531
ab270e70
RK
532 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
533 flags);
534 if (IS_ERR(op))
535 return op;
db509a45 536
ab270e70
RK
537 if (len == trailerlen)
538 return op;
db509a45 539
ab270e70
RK
540 padoff += len;
541 }
db509a45
BB
542
543 ret = mv_cesa_dma_add_data_transfer(chain,
544 CESA_SA_DATA_SRAM_OFFSET,
545 ahashdreq->padding_dma +
546 padoff,
547 trailerlen - padoff,
548 CESA_TDMA_DST_IN_SRAM,
549 flags);
550 if (ret)
551 return ERR_PTR(ret);
552
96212886
RK
553 return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
554 flags);
db509a45
BB
555}
556
557static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
558{
559 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
560 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
561 GFP_KERNEL : GFP_ATOMIC;
53da740f 562 struct mv_cesa_req *basereq = &creq->base;
db509a45
BB
563 struct mv_cesa_ahash_dma_iter iter;
564 struct mv_cesa_op_ctx *op = NULL;
e41bbebd 565 unsigned int frag_len;
db509a45
BB
566 int ret;
567
53da740f
RP
568 basereq->chain.first = NULL;
569 basereq->chain.last = NULL;
db509a45
BB
570
571 if (creq->src_nents) {
572 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
573 DMA_TO_DEVICE);
574 if (!ret) {
575 ret = -ENOMEM;
576 goto err;
577 }
578 }
579
53da740f 580 mv_cesa_tdma_desc_iter_init(&basereq->chain);
db509a45
BB
581 mv_cesa_ahash_req_iter_init(&iter, req);
582
0971d09a
RK
583 /*
584 * Add the cache (left-over data from a previous block) first.
585 * This will never overflow the SRAM size.
586 */
2a8a7857 587 ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
0971d09a 588 if (ret)
db509a45 589 goto err_free_tdma;
db509a45 590
d9bba4c3
RK
591 if (iter.src.sg) {
592 /*
593 * Add all the new data, inserting an operation block and
594 * launch command between each full SRAM block-worth of
e41bbebd 595 * data. We intentionally do not add the final op block.
d9bba4c3 596 */
e41bbebd 597 while (true) {
53da740f 598 ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
8c07f3a8 599 &iter.base,
d9bba4c3
RK
600 &iter.src, flags);
601 if (ret)
602 goto err_free_tdma;
603
e41bbebd 604 frag_len = iter.base.op_len;
db509a45 605
e41bbebd
RK
606 if (!mv_cesa_ahash_req_iter_next_op(&iter))
607 break;
608
53da740f 609 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
e41bbebd 610 frag_len, flags);
d9bba4c3
RK
611 if (IS_ERR(op)) {
612 ret = PTR_ERR(op);
613 goto err_free_tdma;
614 }
db509a45 615 }
e41bbebd 616 } else {
d9bba4c3 617 /* Account for the data that was in the cache. */
e41bbebd
RK
618 frag_len = iter.base.op_len;
619 }
620
58953e15
RK
621 /*
622 * At this point, frag_len indicates whether we have any data
623 * outstanding which needs an operation. Queue up the final
624 * operation, which depends whether this is the final request.
625 */
626 if (creq->last_req)
53da740f 627 op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
8c07f3a8 628 frag_len, flags);
58953e15 629 else if (frag_len)
53da740f 630 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
8c07f3a8 631 frag_len, flags);
db509a45 632
db509a45
BB
633 if (IS_ERR(op)) {
634 ret = PTR_ERR(op);
635 goto err_free_tdma;
636 }
637
638 if (op) {
639 /* Add dummy desc to wait for crypto operation end */
53da740f 640 ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
db509a45
BB
641 if (ret)
642 goto err_free_tdma;
643 }
644
645 if (!creq->last_req)
646 creq->cache_ptr = req->nbytes + creq->cache_ptr -
647 iter.base.len;
648 else
649 creq->cache_ptr = 0;
650
85030c51
RP
651 basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
652 CESA_TDMA_BREAK_CHAIN);
653
db509a45
BB
654 return 0;
655
656err_free_tdma:
53da740f 657 mv_cesa_dma_cleanup(basereq);
db509a45
BB
658 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
659
660err:
661 mv_cesa_ahash_last_cleanup(req);
662
663 return ret;
664}
665
f63601fd
BB
666static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
667{
668 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
db509a45 669
f63601fd 670 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
c22dafb3
LC
671 if (creq->src_nents < 0) {
672 dev_err(cesa_dev->dev, "Invalid number of src SG");
673 return creq->src_nents;
674 }
f63601fd 675
6dc156f4 676 *cached = mv_cesa_ahash_cache_req(req);
db509a45
BB
677
678 if (*cached)
679 return 0;
680
53da740f 681 if (cesa_dev->caps->has_tdma)
6dc156f4
TP
682 return mv_cesa_ahash_dma_req_init(req);
683 else
684 return 0;
f63601fd
BB
685}
686
bf8f91e7 687static int mv_cesa_ahash_queue_req(struct ahash_request *req)
f63601fd
BB
688{
689 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
bf8f91e7 690 struct mv_cesa_engine *engine;
f63601fd
BB
691 bool cached = false;
692 int ret;
693
f63601fd
BB
694 ret = mv_cesa_ahash_req_init(req, &cached);
695 if (ret)
696 return ret;
697
698 if (cached)
699 return 0;
700
bf8f91e7
RP
701 engine = mv_cesa_select_engine(req->nbytes);
702 mv_cesa_ahash_prepare(&req->base, engine);
703
53da740f 704 ret = mv_cesa_queue_req(&req->base, &creq->base);
bf8f91e7 705
cfcd2271 706 if (mv_cesa_req_needs_cleanup(&req->base, ret))
db509a45 707 mv_cesa_ahash_cleanup(req);
db509a45
BB
708
709 return ret;
f63601fd
BB
710}
711
bf8f91e7
RP
712static int mv_cesa_ahash_update(struct ahash_request *req)
713{
714 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
715
716 creq->len += req->nbytes;
717
718 return mv_cesa_ahash_queue_req(req);
719}
720
f63601fd
BB
721static int mv_cesa_ahash_final(struct ahash_request *req)
722{
723 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
724 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
f63601fd
BB
725
726 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
727 creq->last_req = true;
728 req->nbytes = 0;
729
bf8f91e7 730 return mv_cesa_ahash_queue_req(req);
f63601fd
BB
731}
732
733static int mv_cesa_ahash_finup(struct ahash_request *req)
734{
735 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
736 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
f63601fd
BB
737
738 creq->len += req->nbytes;
739 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
740 creq->last_req = true;
741
bf8f91e7 742 return mv_cesa_ahash_queue_req(req);
f63601fd
BB
743}
744
a6479ea4
RK
745static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
746 u64 *len, void *cache)
7aeef693 747{
7aeef693
AE
748 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
749 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
750 unsigned int digsize = crypto_ahash_digestsize(ahash);
a6479ea4 751 unsigned int blocksize;
7aeef693 752
80754539 753 blocksize = crypto_ahash_blocksize(ahash);
7aeef693 754
a6479ea4
RK
755 *len = creq->len;
756 memcpy(hash, creq->state, digsize);
757 memset(cache, 0, blocksize);
063327f5 758 memcpy(cache, creq->cache, creq->cache_ptr);
7aeef693
AE
759
760 return 0;
761}
762
a6479ea4
RK
763static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
764 u64 len, const void *cache)
7aeef693 765{
7aeef693
AE
766 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
767 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
768 unsigned int digsize = crypto_ahash_digestsize(ahash);
a6479ea4 769 unsigned int blocksize;
7aeef693
AE
770 unsigned int cache_ptr;
771 int ret;
772
e72f407e
RK
773 ret = crypto_ahash_init(req);
774 if (ret)
775 return ret;
776
80754539 777 blocksize = crypto_ahash_blocksize(ahash);
a6479ea4 778 if (len >= blocksize)
c3bf02a2
RK
779 mv_cesa_update_op_cfg(&creq->op_tmpl,
780 CESA_SA_DESC_CFG_MID_FRAG,
781 CESA_SA_DESC_CFG_FRAG_MSK);
782
a6479ea4
RK
783 creq->len = len;
784 memcpy(creq->state, hash, digsize);
7aeef693
AE
785 creq->cache_ptr = 0;
786
a6479ea4 787 cache_ptr = do_div(len, blocksize);
7aeef693
AE
788 if (!cache_ptr)
789 return 0;
790
a6479ea4 791 memcpy(creq->cache, cache, cache_ptr);
7aeef693
AE
792 creq->cache_ptr = cache_ptr;
793
794 return 0;
795}
796
a6479ea4
RK
797static int mv_cesa_md5_init(struct ahash_request *req)
798{
b0ef5106 799 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
d30cb2fa 800 struct mv_cesa_op_ctx tmpl = { };
a6479ea4
RK
801
802 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
57cfda1a
RP
803
804 mv_cesa_ahash_init(req, &tmpl, true);
805
b0ef5106
BB
806 creq->state[0] = MD5_H0;
807 creq->state[1] = MD5_H1;
808 creq->state[2] = MD5_H2;
809 creq->state[3] = MD5_H3;
a6479ea4 810
a6479ea4
RK
811 return 0;
812}
813
814static int mv_cesa_md5_export(struct ahash_request *req, void *out)
815{
816 struct md5_state *out_state = out;
817
818 return mv_cesa_ahash_export(req, out_state->hash,
819 &out_state->byte_count, out_state->block);
820}
821
822static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
823{
824 const struct md5_state *in_state = in;
825
826 return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
827 in_state->block);
828}
829
7aeef693
AE
830static int mv_cesa_md5_digest(struct ahash_request *req)
831{
832 int ret;
833
834 ret = mv_cesa_md5_init(req);
835 if (ret)
836 return ret;
837
838 return mv_cesa_ahash_finup(req);
839}
840
841struct ahash_alg mv_md5_alg = {
842 .init = mv_cesa_md5_init,
843 .update = mv_cesa_ahash_update,
844 .final = mv_cesa_ahash_final,
845 .finup = mv_cesa_ahash_finup,
846 .digest = mv_cesa_md5_digest,
847 .export = mv_cesa_md5_export,
848 .import = mv_cesa_md5_import,
849 .halg = {
850 .digestsize = MD5_DIGEST_SIZE,
9f5594c9 851 .statesize = sizeof(struct md5_state),
7aeef693
AE
852 .base = {
853 .cra_name = "md5",
854 .cra_driver_name = "mv-md5",
855 .cra_priority = 300,
856 .cra_flags = CRYPTO_ALG_ASYNC |
857 CRYPTO_ALG_KERN_DRIVER_ONLY,
858 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
859 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
860 .cra_init = mv_cesa_ahash_cra_init,
861 .cra_module = THIS_MODULE,
862 }
863 }
864};
865
f63601fd
BB
866static int mv_cesa_sha1_init(struct ahash_request *req)
867{
b0ef5106 868 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
d30cb2fa 869 struct mv_cesa_op_ctx tmpl = { };
f63601fd
BB
870
871 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
57cfda1a
RP
872
873 mv_cesa_ahash_init(req, &tmpl, false);
874
b0ef5106
BB
875 creq->state[0] = SHA1_H0;
876 creq->state[1] = SHA1_H1;
877 creq->state[2] = SHA1_H2;
878 creq->state[3] = SHA1_H3;
879 creq->state[4] = SHA1_H4;
f63601fd 880
f63601fd
BB
881 return 0;
882}
883
884static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
885{
886 struct sha1_state *out_state = out;
f63601fd 887
a6479ea4
RK
888 return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
889 out_state->buffer);
f63601fd
BB
890}
891
892static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
893{
894 const struct sha1_state *in_state = in;
f63601fd 895
a6479ea4
RK
896 return mv_cesa_ahash_import(req, in_state->state, in_state->count,
897 in_state->buffer);
f63601fd
BB
898}
899
900static int mv_cesa_sha1_digest(struct ahash_request *req)
901{
902 int ret;
903
904 ret = mv_cesa_sha1_init(req);
905 if (ret)
906 return ret;
907
908 return mv_cesa_ahash_finup(req);
909}
910
911struct ahash_alg mv_sha1_alg = {
912 .init = mv_cesa_sha1_init,
913 .update = mv_cesa_ahash_update,
914 .final = mv_cesa_ahash_final,
915 .finup = mv_cesa_ahash_finup,
916 .digest = mv_cesa_sha1_digest,
917 .export = mv_cesa_sha1_export,
918 .import = mv_cesa_sha1_import,
919 .halg = {
920 .digestsize = SHA1_DIGEST_SIZE,
9f5594c9 921 .statesize = sizeof(struct sha1_state),
f63601fd
BB
922 .base = {
923 .cra_name = "sha1",
924 .cra_driver_name = "mv-sha1",
925 .cra_priority = 300,
926 .cra_flags = CRYPTO_ALG_ASYNC |
927 CRYPTO_ALG_KERN_DRIVER_ONLY,
928 .cra_blocksize = SHA1_BLOCK_SIZE,
929 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
930 .cra_init = mv_cesa_ahash_cra_init,
931 .cra_module = THIS_MODULE,
932 }
933 }
934};
935
f85a762e
AE
936static int mv_cesa_sha256_init(struct ahash_request *req)
937{
b0ef5106 938 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
d30cb2fa 939 struct mv_cesa_op_ctx tmpl = { };
f85a762e
AE
940
941 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
57cfda1a
RP
942
943 mv_cesa_ahash_init(req, &tmpl, false);
944
b0ef5106
BB
945 creq->state[0] = SHA256_H0;
946 creq->state[1] = SHA256_H1;
947 creq->state[2] = SHA256_H2;
948 creq->state[3] = SHA256_H3;
949 creq->state[4] = SHA256_H4;
950 creq->state[5] = SHA256_H5;
951 creq->state[6] = SHA256_H6;
952 creq->state[7] = SHA256_H7;
f85a762e 953
f85a762e
AE
954 return 0;
955}
956
957static int mv_cesa_sha256_digest(struct ahash_request *req)
958{
959 int ret;
960
961 ret = mv_cesa_sha256_init(req);
962 if (ret)
963 return ret;
964
965 return mv_cesa_ahash_finup(req);
966}
967
968static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
969{
970 struct sha256_state *out_state = out;
f85a762e 971
a6479ea4
RK
972 return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
973 out_state->buf);
f85a762e
AE
974}
975
976static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
977{
978 const struct sha256_state *in_state = in;
f85a762e 979
a6479ea4
RK
980 return mv_cesa_ahash_import(req, in_state->state, in_state->count,
981 in_state->buf);
f85a762e
AE
982}
983
984struct ahash_alg mv_sha256_alg = {
985 .init = mv_cesa_sha256_init,
986 .update = mv_cesa_ahash_update,
987 .final = mv_cesa_ahash_final,
988 .finup = mv_cesa_ahash_finup,
989 .digest = mv_cesa_sha256_digest,
990 .export = mv_cesa_sha256_export,
991 .import = mv_cesa_sha256_import,
992 .halg = {
993 .digestsize = SHA256_DIGEST_SIZE,
9f5594c9 994 .statesize = sizeof(struct sha256_state),
f85a762e
AE
995 .base = {
996 .cra_name = "sha256",
997 .cra_driver_name = "mv-sha256",
998 .cra_priority = 300,
999 .cra_flags = CRYPTO_ALG_ASYNC |
1000 CRYPTO_ALG_KERN_DRIVER_ONLY,
1001 .cra_blocksize = SHA256_BLOCK_SIZE,
1002 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1003 .cra_init = mv_cesa_ahash_cra_init,
1004 .cra_module = THIS_MODULE,
1005 }
1006 }
1007};
1008
f63601fd
BB
1009struct mv_cesa_ahash_result {
1010 struct completion completion;
1011 int error;
1012};
1013
1014static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
1015 int error)
1016{
1017 struct mv_cesa_ahash_result *result = req->data;
1018
1019 if (error == -EINPROGRESS)
1020 return;
1021
1022 result->error = error;
1023 complete(&result->completion);
1024}
1025
1026static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1027 void *state, unsigned int blocksize)
1028{
1029 struct mv_cesa_ahash_result result;
1030 struct scatterlist sg;
1031 int ret;
1032
1033 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1034 mv_cesa_hmac_ahash_complete, &result);
1035 sg_init_one(&sg, pad, blocksize);
1036 ahash_request_set_crypt(req, &sg, pad, blocksize);
1037 init_completion(&result.completion);
1038
1039 ret = crypto_ahash_init(req);
1040 if (ret)
1041 return ret;
1042
1043 ret = crypto_ahash_update(req);
1044 if (ret && ret != -EINPROGRESS)
1045 return ret;
1046
1047 wait_for_completion_interruptible(&result.completion);
1048 if (result.error)
1049 return result.error;
1050
1051 ret = crypto_ahash_export(req, state);
1052 if (ret)
1053 return ret;
1054
1055 return 0;
1056}
1057
1058static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1059 const u8 *key, unsigned int keylen,
1060 u8 *ipad, u8 *opad,
1061 unsigned int blocksize)
1062{
1063 struct mv_cesa_ahash_result result;
1064 struct scatterlist sg;
1065 int ret;
1066 int i;
1067
1068 if (keylen <= blocksize) {
1069 memcpy(ipad, key, keylen);
1070 } else {
1071 u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1072
1073 if (!keydup)
1074 return -ENOMEM;
1075
1076 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1077 mv_cesa_hmac_ahash_complete,
1078 &result);
1079 sg_init_one(&sg, keydup, keylen);
1080 ahash_request_set_crypt(req, &sg, ipad, keylen);
1081 init_completion(&result.completion);
1082
1083 ret = crypto_ahash_digest(req);
1084 if (ret == -EINPROGRESS) {
1085 wait_for_completion_interruptible(&result.completion);
1086 ret = result.error;
1087 }
1088
1089 /* Set the memory region to 0 to avoid any leak. */
1090 memset(keydup, 0, keylen);
1091 kfree(keydup);
1092
1093 if (ret)
1094 return ret;
1095
1096 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1097 }
1098
1099 memset(ipad + keylen, 0, blocksize - keylen);
1100 memcpy(opad, ipad, blocksize);
1101
1102 for (i = 0; i < blocksize; i++) {
1103 ipad[i] ^= 0x36;
1104 opad[i] ^= 0x5c;
1105 }
1106
1107 return 0;
1108}
1109
1110static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1111 const u8 *key, unsigned int keylen,
1112 void *istate, void *ostate)
1113{
1114 struct ahash_request *req;
1115 struct crypto_ahash *tfm;
1116 unsigned int blocksize;
1117 u8 *ipad = NULL;
1118 u8 *opad;
1119 int ret;
1120
1121 tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
1122 CRYPTO_ALG_TYPE_AHASH_MASK);
1123 if (IS_ERR(tfm))
1124 return PTR_ERR(tfm);
1125
1126 req = ahash_request_alloc(tfm, GFP_KERNEL);
1127 if (!req) {
1128 ret = -ENOMEM;
1129 goto free_ahash;
1130 }
1131
1132 crypto_ahash_clear_flags(tfm, ~0);
1133
1134 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1135
1136 ipad = kzalloc(2 * blocksize, GFP_KERNEL);
1137 if (!ipad) {
1138 ret = -ENOMEM;
1139 goto free_req;
1140 }
1141
1142 opad = ipad + blocksize;
1143
1144 ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1145 if (ret)
1146 goto free_ipad;
1147
1148 ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1149 if (ret)
1150 goto free_ipad;
1151
1152 ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1153
1154free_ipad:
1155 kfree(ipad);
1156free_req:
1157 ahash_request_free(req);
1158free_ahash:
1159 crypto_free_ahash(tfm);
1160
1161 return ret;
1162}
1163
1164static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1165{
1166 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1167
1168 ctx->base.ops = &mv_cesa_ahash_req_ops;
1169
1170 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1171 sizeof(struct mv_cesa_ahash_req));
1172 return 0;
1173}
1174
7aeef693
AE
1175static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1176{
1177 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
d30cb2fa 1178 struct mv_cesa_op_ctx tmpl = { };
7aeef693
AE
1179
1180 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1181 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1182
a9eb678f 1183 mv_cesa_ahash_init(req, &tmpl, true);
7aeef693
AE
1184
1185 return 0;
1186}
1187
1188static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1189 unsigned int keylen)
1190{
1191 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1192 struct md5_state istate, ostate;
1193 int ret, i;
1194
1195 ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1196 if (ret)
1197 return ret;
1198
1199 for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1200 ctx->iv[i] = be32_to_cpu(istate.hash[i]);
1201
1202 for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1203 ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]);
1204
1205 return 0;
1206}
1207
1208static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1209{
1210 int ret;
1211
1212 ret = mv_cesa_ahmac_md5_init(req);
1213 if (ret)
1214 return ret;
1215
1216 return mv_cesa_ahash_finup(req);
1217}
1218
1219struct ahash_alg mv_ahmac_md5_alg = {
1220 .init = mv_cesa_ahmac_md5_init,
1221 .update = mv_cesa_ahash_update,
1222 .final = mv_cesa_ahash_final,
1223 .finup = mv_cesa_ahash_finup,
1224 .digest = mv_cesa_ahmac_md5_digest,
1225 .setkey = mv_cesa_ahmac_md5_setkey,
1226 .export = mv_cesa_md5_export,
1227 .import = mv_cesa_md5_import,
1228 .halg = {
1229 .digestsize = MD5_DIGEST_SIZE,
1230 .statesize = sizeof(struct md5_state),
1231 .base = {
1232 .cra_name = "hmac(md5)",
1233 .cra_driver_name = "mv-hmac-md5",
1234 .cra_priority = 300,
1235 .cra_flags = CRYPTO_ALG_ASYNC |
1236 CRYPTO_ALG_KERN_DRIVER_ONLY,
1237 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1238 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1239 .cra_init = mv_cesa_ahmac_cra_init,
1240 .cra_module = THIS_MODULE,
1241 }
1242 }
1243};
1244
f63601fd
BB
1245static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1246{
1247 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
d30cb2fa 1248 struct mv_cesa_op_ctx tmpl = { };
f63601fd
BB
1249
1250 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1251 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1252
a9eb678f 1253 mv_cesa_ahash_init(req, &tmpl, false);
f63601fd
BB
1254
1255 return 0;
1256}
1257
1258static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1259 unsigned int keylen)
1260{
1261 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1262 struct sha1_state istate, ostate;
1263 int ret, i;
1264
1265 ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1266 if (ret)
1267 return ret;
1268
1269 for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1270 ctx->iv[i] = be32_to_cpu(istate.state[i]);
1271
1272 for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1273 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1274
1275 return 0;
1276}
1277
1278static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1279{
1280 int ret;
1281
1282 ret = mv_cesa_ahmac_sha1_init(req);
1283 if (ret)
1284 return ret;
1285
1286 return mv_cesa_ahash_finup(req);
1287}
1288
1289struct ahash_alg mv_ahmac_sha1_alg = {
1290 .init = mv_cesa_ahmac_sha1_init,
1291 .update = mv_cesa_ahash_update,
1292 .final = mv_cesa_ahash_final,
1293 .finup = mv_cesa_ahash_finup,
1294 .digest = mv_cesa_ahmac_sha1_digest,
1295 .setkey = mv_cesa_ahmac_sha1_setkey,
1296 .export = mv_cesa_sha1_export,
1297 .import = mv_cesa_sha1_import,
1298 .halg = {
1299 .digestsize = SHA1_DIGEST_SIZE,
1300 .statesize = sizeof(struct sha1_state),
1301 .base = {
1302 .cra_name = "hmac(sha1)",
1303 .cra_driver_name = "mv-hmac-sha1",
1304 .cra_priority = 300,
1305 .cra_flags = CRYPTO_ALG_ASYNC |
1306 CRYPTO_ALG_KERN_DRIVER_ONLY,
1307 .cra_blocksize = SHA1_BLOCK_SIZE,
1308 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1309 .cra_init = mv_cesa_ahmac_cra_init,
1310 .cra_module = THIS_MODULE,
1311 }
1312 }
1313};
f85a762e
AE
1314
1315static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1316 unsigned int keylen)
1317{
1318 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1319 struct sha256_state istate, ostate;
1320 int ret, i;
1321
1322 ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1323 if (ret)
1324 return ret;
1325
1326 for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1327 ctx->iv[i] = be32_to_cpu(istate.state[i]);
1328
1329 for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1330 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1331
1332 return 0;
1333}
1334
1335static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1336{
1337 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
d30cb2fa 1338 struct mv_cesa_op_ctx tmpl = { };
f85a762e
AE
1339
1340 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1341 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1342
a9eb678f 1343 mv_cesa_ahash_init(req, &tmpl, false);
f85a762e
AE
1344
1345 return 0;
1346}
1347
1348static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1349{
1350 int ret;
1351
1352 ret = mv_cesa_ahmac_sha256_init(req);
1353 if (ret)
1354 return ret;
1355
1356 return mv_cesa_ahash_finup(req);
1357}
1358
1359struct ahash_alg mv_ahmac_sha256_alg = {
1360 .init = mv_cesa_ahmac_sha256_init,
1361 .update = mv_cesa_ahash_update,
1362 .final = mv_cesa_ahash_final,
1363 .finup = mv_cesa_ahash_finup,
1364 .digest = mv_cesa_ahmac_sha256_digest,
1365 .setkey = mv_cesa_ahmac_sha256_setkey,
1366 .export = mv_cesa_sha256_export,
1367 .import = mv_cesa_sha256_import,
1368 .halg = {
1369 .digestsize = SHA256_DIGEST_SIZE,
1370 .statesize = sizeof(struct sha256_state),
1371 .base = {
1372 .cra_name = "hmac(sha256)",
1373 .cra_driver_name = "mv-hmac-sha256",
1374 .cra_priority = 300,
1375 .cra_flags = CRYPTO_ALG_ASYNC |
1376 CRYPTO_ALG_KERN_DRIVER_ONLY,
1377 .cra_blocksize = SHA256_BLOCK_SIZE,
1378 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1379 .cra_init = mv_cesa_ahmac_cra_init,
1380 .cra_module = THIS_MODULE,
1381 }
1382 }
1383};
This page took 0.13993 seconds and 5 git commands to generate.