4 * Support for OMAP SHA1/MD5 HW acceleration.
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 * Copyright (c) 2011 Texas Instruments Incorporated
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
14 * Some ideas are from old omap-sha1-md5.c driver.
17 #define pr_fmt(fmt) "%s: " fmt, __func__
19 #include <linux/err.h>
20 #include <linux/device.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel.h>
26 #include <linux/irq.h>
28 #include <linux/platform_device.h>
29 #include <linux/scatterlist.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/dmaengine.h>
32 #include <linux/omap-dma.h>
33 #include <linux/pm_runtime.h>
35 #include <linux/of_device.h>
36 #include <linux/of_address.h>
37 #include <linux/of_irq.h>
38 #include <linux/delay.h>
39 #include <linux/crypto.h>
40 #include <linux/cryptohash.h>
41 #include <crypto/scatterwalk.h>
42 #include <crypto/algapi.h>
43 #include <crypto/sha.h>
44 #include <crypto/hash.h>
45 #include <crypto/internal/hash.h>
47 #define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
48 #define MD5_DIGEST_SIZE 16
50 #define DST_MAXBURST 16
51 #define DMA_MIN (DST_MAXBURST * sizeof(u32))
53 #define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04))
54 #define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04))
55 #define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs)
57 #define SHA_REG_ODIGEST(x) (0x00 + ((x) * 0x04))
59 #define SHA_REG_CTRL 0x18
60 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
61 #define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
62 #define SHA_REG_CTRL_ALGO_CONST (1 << 3)
63 #define SHA_REG_CTRL_ALGO (1 << 2)
64 #define SHA_REG_CTRL_INPUT_READY (1 << 1)
65 #define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
67 #define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs)
69 #define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs)
70 #define SHA_REG_MASK_DMA_EN (1 << 3)
71 #define SHA_REG_MASK_IT_EN (1 << 2)
72 #define SHA_REG_MASK_SOFTRESET (1 << 1)
73 #define SHA_REG_AUTOIDLE (1 << 0)
75 #define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs)
76 #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
78 #define SHA_REG_MODE 0x44
79 #define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7)
80 #define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5)
81 #define SHA_REG_MODE_CLOSE_HASH (1 << 4)
82 #define SHA_REG_MODE_ALGO_CONSTANT (1 << 3)
83 #define SHA_REG_MODE_ALGO_MASK (3 << 1)
84 #define SHA_REG_MODE_ALGO_MD5_128 (0 << 1)
85 #define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1)
86 #define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1)
87 #define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1)
89 #define SHA_REG_LENGTH 0x48
91 #define SHA_REG_IRQSTATUS 0x118
92 #define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3)
93 #define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
94 #define SHA_REG_IRQSTATUS_INPUT_RDY (1 << 1)
95 #define SHA_REG_IRQSTATUS_OUTPUT_RDY (1 << 0)
97 #define SHA_REG_IRQENA 0x11C
98 #define SHA_REG_IRQENA_CTX_RDY (1 << 3)
99 #define SHA_REG_IRQENA_PARTHASH_RDY (1 << 2)
100 #define SHA_REG_IRQENA_INPUT_RDY (1 << 1)
101 #define SHA_REG_IRQENA_OUTPUT_RDY (1 << 0)
103 #define DEFAULT_TIMEOUT_INTERVAL HZ
105 /* mostly device flags */
107 #define FLAGS_FINAL 1
108 #define FLAGS_DMA_ACTIVE 2
109 #define FLAGS_OUTPUT_READY 3
112 #define FLAGS_DMA_READY 6
113 #define FLAGS_AUTO_XOR 7
114 #define FLAGS_BE32_SHA1 8
116 #define FLAGS_FINUP 16
119 #define FLAGS_MODE_SHIFT 18
120 #define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK \
121 << (FLAGS_MODE_SHIFT - 1))
122 #define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 \
123 << (FLAGS_MODE_SHIFT - 1))
124 #define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 \
125 << (FLAGS_MODE_SHIFT - 1))
126 #define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 \
127 << (FLAGS_MODE_SHIFT - 1))
128 #define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 \
129 << (FLAGS_MODE_SHIFT - 1))
130 #define FLAGS_HMAC 20
131 #define FLAGS_ERROR 21
136 #define OMAP_ALIGN_MASK (sizeof(u32)-1)
137 #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
139 #define BUFLEN PAGE_SIZE
141 struct omap_sham_dev
;
143 struct omap_sham_reqctx
{
144 struct omap_sham_dev
*dd
;
148 u8 digest
[SHA256_DIGEST_SIZE
] OMAP_ALIGNED
;
155 struct scatterlist
*sg
;
156 struct scatterlist sgl
;
157 unsigned int offset
; /* offset in current sg */
158 unsigned int total
; /* total request */
160 u8 buffer
[0] OMAP_ALIGNED
;
163 struct omap_sham_hmac_ctx
{
164 struct crypto_shash
*shash
;
165 u8 ipad
[SHA1_MD5_BLOCK_SIZE
] OMAP_ALIGNED
;
166 u8 opad
[SHA1_MD5_BLOCK_SIZE
] OMAP_ALIGNED
;
169 struct omap_sham_ctx
{
170 struct omap_sham_dev
*dd
;
175 struct crypto_shash
*fallback
;
177 struct omap_sham_hmac_ctx base
[0];
180 #define OMAP_SHAM_QUEUE_LENGTH 1
182 struct omap_sham_algs_info
{
183 struct ahash_alg
*algs_list
;
185 unsigned int registered
;
188 struct omap_sham_pdata
{
189 struct omap_sham_algs_info
*algs_info
;
190 unsigned int algs_info_size
;
194 void (*copy_hash
)(struct ahash_request
*req
, int out
);
195 void (*write_ctrl
)(struct omap_sham_dev
*dd
, size_t length
,
197 void (*trigger
)(struct omap_sham_dev
*dd
, size_t length
);
198 int (*poll_irq
)(struct omap_sham_dev
*dd
);
199 irqreturn_t (*intr_hdlr
)(int irq
, void *dev_id
);
215 struct omap_sham_dev
{
216 struct list_head list
;
217 unsigned long phys_base
;
219 void __iomem
*io_base
;
224 struct dma_chan
*dma_lch
;
225 struct tasklet_struct done_task
;
228 struct crypto_queue queue
;
229 struct ahash_request
*req
;
231 const struct omap_sham_pdata
*pdata
;
234 struct omap_sham_drv
{
235 struct list_head dev_list
;
240 static struct omap_sham_drv sham
= {
241 .dev_list
= LIST_HEAD_INIT(sham
.dev_list
),
242 .lock
= __SPIN_LOCK_UNLOCKED(sham
.lock
),
245 static inline u32
omap_sham_read(struct omap_sham_dev
*dd
, u32 offset
)
247 return __raw_readl(dd
->io_base
+ offset
);
250 static inline void omap_sham_write(struct omap_sham_dev
*dd
,
251 u32 offset
, u32 value
)
253 __raw_writel(value
, dd
->io_base
+ offset
);
256 static inline void omap_sham_write_mask(struct omap_sham_dev
*dd
, u32 address
,
261 val
= omap_sham_read(dd
, address
);
264 omap_sham_write(dd
, address
, val
);
267 static inline int omap_sham_wait(struct omap_sham_dev
*dd
, u32 offset
, u32 bit
)
269 unsigned long timeout
= jiffies
+ DEFAULT_TIMEOUT_INTERVAL
;
271 while (!(omap_sham_read(dd
, offset
) & bit
)) {
272 if (time_is_before_jiffies(timeout
))
279 static void omap_sham_copy_hash_omap2(struct ahash_request
*req
, int out
)
281 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
282 struct omap_sham_dev
*dd
= ctx
->dd
;
283 u32
*hash
= (u32
*)ctx
->digest
;
286 for (i
= 0; i
< dd
->pdata
->digest_size
/ sizeof(u32
); i
++) {
288 hash
[i
] = omap_sham_read(dd
, SHA_REG_IDIGEST(dd
, i
));
290 omap_sham_write(dd
, SHA_REG_IDIGEST(dd
, i
), hash
[i
]);
294 static void omap_sham_copy_hash_omap4(struct ahash_request
*req
, int out
)
296 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
297 struct omap_sham_dev
*dd
= ctx
->dd
;
300 if (ctx
->flags
& BIT(FLAGS_HMAC
)) {
301 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(dd
->req
);
302 struct omap_sham_ctx
*tctx
= crypto_ahash_ctx(tfm
);
303 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
304 u32
*opad
= (u32
*)bctx
->opad
;
306 for (i
= 0; i
< dd
->pdata
->digest_size
/ sizeof(u32
); i
++) {
308 opad
[i
] = omap_sham_read(dd
,
311 omap_sham_write(dd
, SHA_REG_ODIGEST(i
),
316 omap_sham_copy_hash_omap2(req
, out
);
319 static void omap_sham_copy_ready_hash(struct ahash_request
*req
)
321 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
322 u32
*in
= (u32
*)ctx
->digest
;
323 u32
*hash
= (u32
*)req
->result
;
324 int i
, d
, big_endian
= 0;
329 switch (ctx
->flags
& FLAGS_MODE_MASK
) {
331 d
= MD5_DIGEST_SIZE
/ sizeof(u32
);
333 case FLAGS_MODE_SHA1
:
334 /* OMAP2 SHA1 is big endian */
335 if (test_bit(FLAGS_BE32_SHA1
, &ctx
->dd
->flags
))
337 d
= SHA1_DIGEST_SIZE
/ sizeof(u32
);
339 case FLAGS_MODE_SHA224
:
340 d
= SHA224_DIGEST_SIZE
/ sizeof(u32
);
342 case FLAGS_MODE_SHA256
:
343 d
= SHA256_DIGEST_SIZE
/ sizeof(u32
);
350 for (i
= 0; i
< d
; i
++)
351 hash
[i
] = be32_to_cpu(in
[i
]);
353 for (i
= 0; i
< d
; i
++)
354 hash
[i
] = le32_to_cpu(in
[i
]);
357 static int omap_sham_hw_init(struct omap_sham_dev
*dd
)
359 pm_runtime_get_sync(dd
->dev
);
361 if (!test_bit(FLAGS_INIT
, &dd
->flags
)) {
362 set_bit(FLAGS_INIT
, &dd
->flags
);
369 static void omap_sham_write_ctrl_omap2(struct omap_sham_dev
*dd
, size_t length
,
372 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
373 u32 val
= length
<< 5, mask
;
375 if (likely(ctx
->digcnt
))
376 omap_sham_write(dd
, SHA_REG_DIGCNT(dd
), ctx
->digcnt
);
378 omap_sham_write_mask(dd
, SHA_REG_MASK(dd
),
379 SHA_REG_MASK_IT_EN
| (dma
? SHA_REG_MASK_DMA_EN
: 0),
380 SHA_REG_MASK_IT_EN
| SHA_REG_MASK_DMA_EN
);
382 * Setting ALGO_CONST only for the first iteration
383 * and CLOSE_HASH only for the last one.
385 if ((ctx
->flags
& FLAGS_MODE_MASK
) == FLAGS_MODE_SHA1
)
386 val
|= SHA_REG_CTRL_ALGO
;
388 val
|= SHA_REG_CTRL_ALGO_CONST
;
390 val
|= SHA_REG_CTRL_CLOSE_HASH
;
392 mask
= SHA_REG_CTRL_ALGO_CONST
| SHA_REG_CTRL_CLOSE_HASH
|
393 SHA_REG_CTRL_ALGO
| SHA_REG_CTRL_LENGTH
;
395 omap_sham_write_mask(dd
, SHA_REG_CTRL
, val
, mask
);
398 static void omap_sham_trigger_omap2(struct omap_sham_dev
*dd
, size_t length
)
402 static int omap_sham_poll_irq_omap2(struct omap_sham_dev
*dd
)
404 return omap_sham_wait(dd
, SHA_REG_CTRL
, SHA_REG_CTRL_INPUT_READY
);
407 static void omap_sham_write_n(struct omap_sham_dev
*dd
, u32 offset
,
408 u32
*value
, int count
)
410 for (; count
--; value
++, offset
+= 4)
411 omap_sham_write(dd
, offset
, *value
);
414 static void omap_sham_write_ctrl_omap4(struct omap_sham_dev
*dd
, size_t length
,
417 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
421 * Setting ALGO_CONST only for the first iteration and
422 * CLOSE_HASH only for the last one. Note that flags mode bits
423 * correspond to algorithm encoding in mode register.
425 val
= (ctx
->flags
& FLAGS_MODE_MASK
) >> (FLAGS_MODE_SHIFT
- 1);
427 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(dd
->req
);
428 struct omap_sham_ctx
*tctx
= crypto_ahash_ctx(tfm
);
429 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
431 val
|= SHA_REG_MODE_ALGO_CONSTANT
;
433 if (ctx
->flags
& BIT(FLAGS_HMAC
)) {
434 val
|= SHA_REG_MODE_HMAC_KEY_PROC
;
435 omap_sham_write_n(dd
, SHA_REG_ODIGEST(0),
437 SHA1_BLOCK_SIZE
/ sizeof(u32
));
438 ctx
->digcnt
+= SHA1_BLOCK_SIZE
;
443 val
|= SHA_REG_MODE_CLOSE_HASH
;
445 if (ctx
->flags
& BIT(FLAGS_HMAC
))
446 val
|= SHA_REG_MODE_HMAC_OUTER_HASH
;
449 mask
= SHA_REG_MODE_ALGO_CONSTANT
| SHA_REG_MODE_CLOSE_HASH
|
450 SHA_REG_MODE_ALGO_MASK
| SHA_REG_MODE_HMAC_OUTER_HASH
|
451 SHA_REG_MODE_HMAC_KEY_PROC
;
453 dev_dbg(dd
->dev
, "ctrl: %08x, flags: %08lx\n", val
, ctx
->flags
);
454 omap_sham_write_mask(dd
, SHA_REG_MODE
, val
, mask
);
455 omap_sham_write(dd
, SHA_REG_IRQENA
, SHA_REG_IRQENA_OUTPUT_RDY
);
456 omap_sham_write_mask(dd
, SHA_REG_MASK(dd
),
458 (dma
? SHA_REG_MASK_DMA_EN
: 0),
459 SHA_REG_MASK_IT_EN
| SHA_REG_MASK_DMA_EN
);
462 static void omap_sham_trigger_omap4(struct omap_sham_dev
*dd
, size_t length
)
464 omap_sham_write(dd
, SHA_REG_LENGTH
, length
);
467 static int omap_sham_poll_irq_omap4(struct omap_sham_dev
*dd
)
469 return omap_sham_wait(dd
, SHA_REG_IRQSTATUS
,
470 SHA_REG_IRQSTATUS_INPUT_RDY
);
473 static int omap_sham_xmit_cpu(struct omap_sham_dev
*dd
, const u8
*buf
,
474 size_t length
, int final
)
476 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
478 const u32
*buffer
= (const u32
*)buf
;
480 dev_dbg(dd
->dev
, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
481 ctx
->digcnt
, length
, final
);
483 dd
->pdata
->write_ctrl(dd
, length
, final
, 0);
484 dd
->pdata
->trigger(dd
, length
);
486 /* should be non-zero before next lines to disable clocks later */
487 ctx
->digcnt
+= length
;
489 if (dd
->pdata
->poll_irq(dd
))
493 set_bit(FLAGS_FINAL
, &dd
->flags
); /* catch last interrupt */
495 set_bit(FLAGS_CPU
, &dd
->flags
);
497 len32
= DIV_ROUND_UP(length
, sizeof(u32
));
499 for (count
= 0; count
< len32
; count
++)
500 omap_sham_write(dd
, SHA_REG_DIN(dd
, count
), buffer
[count
]);
505 static void omap_sham_dma_callback(void *param
)
507 struct omap_sham_dev
*dd
= param
;
509 set_bit(FLAGS_DMA_READY
, &dd
->flags
);
510 tasklet_schedule(&dd
->done_task
);
513 static int omap_sham_xmit_dma(struct omap_sham_dev
*dd
, dma_addr_t dma_addr
,
514 size_t length
, int final
, int is_sg
)
516 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
517 struct dma_async_tx_descriptor
*tx
;
518 struct dma_slave_config cfg
;
521 dev_dbg(dd
->dev
, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
522 ctx
->digcnt
, length
, final
);
524 memset(&cfg
, 0, sizeof(cfg
));
526 cfg
.dst_addr
= dd
->phys_base
+ SHA_REG_DIN(dd
, 0);
527 cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
528 cfg
.dst_maxburst
= DST_MAXBURST
;
530 ret
= dmaengine_slave_config(dd
->dma_lch
, &cfg
);
532 pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret
);
536 len32
= DIV_ROUND_UP(length
, DMA_MIN
) * DMA_MIN
;
540 * The SG entry passed in may not have the 'length' member
541 * set correctly so use a local SG entry (sgl) with the
542 * proper value for 'length' instead. If this is not done,
543 * the dmaengine may try to DMA the incorrect amount of data.
545 sg_init_table(&ctx
->sgl
, 1);
546 ctx
->sgl
.page_link
= ctx
->sg
->page_link
;
547 ctx
->sgl
.offset
= ctx
->sg
->offset
;
548 sg_dma_len(&ctx
->sgl
) = len32
;
549 sg_dma_address(&ctx
->sgl
) = sg_dma_address(ctx
->sg
);
551 tx
= dmaengine_prep_slave_sg(dd
->dma_lch
, &ctx
->sgl
, 1,
552 DMA_MEM_TO_DEV
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
554 tx
= dmaengine_prep_slave_single(dd
->dma_lch
, dma_addr
, len32
,
555 DMA_MEM_TO_DEV
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
559 dev_err(dd
->dev
, "prep_slave_sg/single() failed\n");
563 tx
->callback
= omap_sham_dma_callback
;
564 tx
->callback_param
= dd
;
566 dd
->pdata
->write_ctrl(dd
, length
, final
, 1);
568 ctx
->digcnt
+= length
;
571 set_bit(FLAGS_FINAL
, &dd
->flags
); /* catch last interrupt */
573 set_bit(FLAGS_DMA_ACTIVE
, &dd
->flags
);
575 dmaengine_submit(tx
);
576 dma_async_issue_pending(dd
->dma_lch
);
578 dd
->pdata
->trigger(dd
, length
);
583 static size_t omap_sham_append_buffer(struct omap_sham_reqctx
*ctx
,
584 const u8
*data
, size_t length
)
586 size_t count
= min(length
, ctx
->buflen
- ctx
->bufcnt
);
588 count
= min(count
, ctx
->total
);
591 memcpy(ctx
->buffer
+ ctx
->bufcnt
, data
, count
);
592 ctx
->bufcnt
+= count
;
597 static size_t omap_sham_append_sg(struct omap_sham_reqctx
*ctx
)
602 count
= omap_sham_append_buffer(ctx
,
603 sg_virt(ctx
->sg
) + ctx
->offset
,
604 ctx
->sg
->length
- ctx
->offset
);
607 ctx
->offset
+= count
;
609 if (ctx
->offset
== ctx
->sg
->length
) {
610 ctx
->sg
= sg_next(ctx
->sg
);
621 static int omap_sham_xmit_dma_map(struct omap_sham_dev
*dd
,
622 struct omap_sham_reqctx
*ctx
,
623 size_t length
, int final
)
627 ctx
->dma_addr
= dma_map_single(dd
->dev
, ctx
->buffer
, ctx
->buflen
,
629 if (dma_mapping_error(dd
->dev
, ctx
->dma_addr
)) {
630 dev_err(dd
->dev
, "dma %u bytes error\n", ctx
->buflen
);
634 ctx
->flags
&= ~BIT(FLAGS_SG
);
636 ret
= omap_sham_xmit_dma(dd
, ctx
->dma_addr
, length
, final
, 0);
637 if (ret
!= -EINPROGRESS
)
638 dma_unmap_single(dd
->dev
, ctx
->dma_addr
, ctx
->buflen
,
644 static int omap_sham_update_dma_slow(struct omap_sham_dev
*dd
)
646 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
650 omap_sham_append_sg(ctx
);
652 final
= (ctx
->flags
& BIT(FLAGS_FINUP
)) && !ctx
->total
;
654 dev_dbg(dd
->dev
, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
655 ctx
->bufcnt
, ctx
->digcnt
, final
);
657 if (final
|| (ctx
->bufcnt
== ctx
->buflen
&& ctx
->total
)) {
660 return omap_sham_xmit_dma_map(dd
, ctx
, count
, final
);
666 /* Start address alignment */
667 #define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
668 /* SHA1 block size alignment */
669 #define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
671 static int omap_sham_update_dma_start(struct omap_sham_dev
*dd
)
673 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
674 unsigned int length
, final
, tail
;
675 struct scatterlist
*sg
;
681 if (ctx
->bufcnt
|| ctx
->offset
)
682 return omap_sham_update_dma_slow(dd
);
685 * Don't use the sg interface when the transfer size is less
686 * than the number of elements in a DMA frame. Otherwise,
687 * the dmaengine infrastructure will calculate that it needs
688 * to transfer 0 frames which ultimately fails.
690 if (ctx
->total
< (DST_MAXBURST
* sizeof(u32
)))
691 return omap_sham_update_dma_slow(dd
);
693 dev_dbg(dd
->dev
, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
694 ctx
->digcnt
, ctx
->bufcnt
, ctx
->total
);
699 return omap_sham_update_dma_slow(dd
);
701 if (!sg_is_last(sg
) && !SG_SA(sg
))
702 /* size is not SHA1_BLOCK_SIZE aligned */
703 return omap_sham_update_dma_slow(dd
);
705 length
= min(ctx
->total
, sg
->length
);
707 if (sg_is_last(sg
)) {
708 if (!(ctx
->flags
& BIT(FLAGS_FINUP
))) {
709 /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
710 tail
= length
& (SHA1_MD5_BLOCK_SIZE
- 1);
711 /* without finup() we need one block to close hash */
713 tail
= SHA1_MD5_BLOCK_SIZE
;
718 if (!dma_map_sg(dd
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
)) {
719 dev_err(dd
->dev
, "dma_map_sg error\n");
723 ctx
->flags
|= BIT(FLAGS_SG
);
725 ctx
->total
-= length
;
726 ctx
->offset
= length
; /* offset where to start slow */
728 final
= (ctx
->flags
& BIT(FLAGS_FINUP
)) && !ctx
->total
;
730 ret
= omap_sham_xmit_dma(dd
, sg_dma_address(ctx
->sg
), length
, final
, 1);
731 if (ret
!= -EINPROGRESS
)
732 dma_unmap_sg(dd
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
);
737 static int omap_sham_update_cpu(struct omap_sham_dev
*dd
)
739 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
742 omap_sham_append_sg(ctx
);
743 bufcnt
= ctx
->bufcnt
;
746 return omap_sham_xmit_cpu(dd
, ctx
->buffer
, bufcnt
, 1);
749 static int omap_sham_update_dma_stop(struct omap_sham_dev
*dd
)
751 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
753 dmaengine_terminate_all(dd
->dma_lch
);
755 if (ctx
->flags
& BIT(FLAGS_SG
)) {
756 dma_unmap_sg(dd
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
);
757 if (ctx
->sg
->length
== ctx
->offset
) {
758 ctx
->sg
= sg_next(ctx
->sg
);
763 dma_unmap_single(dd
->dev
, ctx
->dma_addr
, ctx
->buflen
,
770 static int omap_sham_init(struct ahash_request
*req
)
772 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
773 struct omap_sham_ctx
*tctx
= crypto_ahash_ctx(tfm
);
774 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
775 struct omap_sham_dev
*dd
= NULL
, *tmp
;
777 spin_lock_bh(&sham
.lock
);
779 list_for_each_entry(tmp
, &sham
.dev_list
, list
) {
787 spin_unlock_bh(&sham
.lock
);
793 dev_dbg(dd
->dev
, "init: digest size: %d\n",
794 crypto_ahash_digestsize(tfm
));
796 switch (crypto_ahash_digestsize(tfm
)) {
797 case MD5_DIGEST_SIZE
:
798 ctx
->flags
|= FLAGS_MODE_MD5
;
800 case SHA1_DIGEST_SIZE
:
801 ctx
->flags
|= FLAGS_MODE_SHA1
;
803 case SHA224_DIGEST_SIZE
:
804 ctx
->flags
|= FLAGS_MODE_SHA224
;
806 case SHA256_DIGEST_SIZE
:
807 ctx
->flags
|= FLAGS_MODE_SHA256
;
813 ctx
->buflen
= BUFLEN
;
815 if (tctx
->flags
& BIT(FLAGS_HMAC
)) {
816 if (!test_bit(FLAGS_AUTO_XOR
, &dd
->flags
)) {
817 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
819 memcpy(ctx
->buffer
, bctx
->ipad
, SHA1_MD5_BLOCK_SIZE
);
820 ctx
->bufcnt
= SHA1_MD5_BLOCK_SIZE
;
823 ctx
->flags
|= BIT(FLAGS_HMAC
);
830 static int omap_sham_update_req(struct omap_sham_dev
*dd
)
832 struct ahash_request
*req
= dd
->req
;
833 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
836 dev_dbg(dd
->dev
, "update_req: total: %u, digcnt: %d, finup: %d\n",
837 ctx
->total
, ctx
->digcnt
, (ctx
->flags
& BIT(FLAGS_FINUP
)) != 0);
839 if (ctx
->flags
& BIT(FLAGS_CPU
))
840 err
= omap_sham_update_cpu(dd
);
842 err
= omap_sham_update_dma_start(dd
);
844 /* wait for dma completion before can take more data */
845 dev_dbg(dd
->dev
, "update: err: %d, digcnt: %d\n", err
, ctx
->digcnt
);
850 static int omap_sham_final_req(struct omap_sham_dev
*dd
)
852 struct ahash_request
*req
= dd
->req
;
853 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
854 int err
= 0, use_dma
= 1;
856 if (ctx
->bufcnt
<= DMA_MIN
)
857 /* faster to handle last block with cpu */
861 err
= omap_sham_xmit_dma_map(dd
, ctx
, ctx
->bufcnt
, 1);
863 err
= omap_sham_xmit_cpu(dd
, ctx
->buffer
, ctx
->bufcnt
, 1);
867 dev_dbg(dd
->dev
, "final_req: err: %d\n", err
);
872 static int omap_sham_finish_hmac(struct ahash_request
*req
)
874 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
875 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
876 int bs
= crypto_shash_blocksize(bctx
->shash
);
877 int ds
= crypto_shash_digestsize(bctx
->shash
);
879 struct shash_desc shash
;
880 char ctx
[crypto_shash_descsize(bctx
->shash
)];
883 desc
.shash
.tfm
= bctx
->shash
;
884 desc
.shash
.flags
= 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
886 return crypto_shash_init(&desc
.shash
) ?:
887 crypto_shash_update(&desc
.shash
, bctx
->opad
, bs
) ?:
888 crypto_shash_finup(&desc
.shash
, req
->result
, ds
, req
->result
);
891 static int omap_sham_finish(struct ahash_request
*req
)
893 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
894 struct omap_sham_dev
*dd
= ctx
->dd
;
898 omap_sham_copy_ready_hash(req
);
899 if ((ctx
->flags
& BIT(FLAGS_HMAC
)) &&
900 !test_bit(FLAGS_AUTO_XOR
, &dd
->flags
))
901 err
= omap_sham_finish_hmac(req
);
904 dev_dbg(dd
->dev
, "digcnt: %d, bufcnt: %d\n", ctx
->digcnt
, ctx
->bufcnt
);
909 static void omap_sham_finish_req(struct ahash_request
*req
, int err
)
911 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
912 struct omap_sham_dev
*dd
= ctx
->dd
;
915 dd
->pdata
->copy_hash(req
, 1);
916 if (test_bit(FLAGS_FINAL
, &dd
->flags
))
917 err
= omap_sham_finish(req
);
919 ctx
->flags
|= BIT(FLAGS_ERROR
);
922 /* atomic operation is not needed here */
923 dd
->flags
&= ~(BIT(FLAGS_BUSY
) | BIT(FLAGS_FINAL
) | BIT(FLAGS_CPU
) |
924 BIT(FLAGS_DMA_READY
) | BIT(FLAGS_OUTPUT_READY
));
926 pm_runtime_put_sync(dd
->dev
);
928 if (req
->base
.complete
)
929 req
->base
.complete(&req
->base
, err
);
931 /* handle new request */
932 tasklet_schedule(&dd
->done_task
);
935 static int omap_sham_handle_queue(struct omap_sham_dev
*dd
,
936 struct ahash_request
*req
)
938 struct crypto_async_request
*async_req
, *backlog
;
939 struct omap_sham_reqctx
*ctx
;
941 int err
= 0, ret
= 0;
943 spin_lock_irqsave(&dd
->lock
, flags
);
945 ret
= ahash_enqueue_request(&dd
->queue
, req
);
946 if (test_bit(FLAGS_BUSY
, &dd
->flags
)) {
947 spin_unlock_irqrestore(&dd
->lock
, flags
);
950 backlog
= crypto_get_backlog(&dd
->queue
);
951 async_req
= crypto_dequeue_request(&dd
->queue
);
953 set_bit(FLAGS_BUSY
, &dd
->flags
);
954 spin_unlock_irqrestore(&dd
->lock
, flags
);
960 backlog
->complete(backlog
, -EINPROGRESS
);
962 req
= ahash_request_cast(async_req
);
964 ctx
= ahash_request_ctx(req
);
966 dev_dbg(dd
->dev
, "handling new req, op: %lu, nbytes: %d\n",
967 ctx
->op
, req
->nbytes
);
969 err
= omap_sham_hw_init(dd
);
974 /* request has changed - restore hash */
975 dd
->pdata
->copy_hash(req
, 0);
977 if (ctx
->op
== OP_UPDATE
) {
978 err
= omap_sham_update_req(dd
);
979 if (err
!= -EINPROGRESS
&& (ctx
->flags
& BIT(FLAGS_FINUP
)))
980 /* no final() after finup() */
981 err
= omap_sham_final_req(dd
);
982 } else if (ctx
->op
== OP_FINAL
) {
983 err
= omap_sham_final_req(dd
);
986 if (err
!= -EINPROGRESS
)
987 /* done_task will not finish it, so do it here */
988 omap_sham_finish_req(req
, err
);
990 dev_dbg(dd
->dev
, "exit, err: %d\n", err
);
995 static int omap_sham_enqueue(struct ahash_request
*req
, unsigned int op
)
997 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
998 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
999 struct omap_sham_dev
*dd
= tctx
->dd
;
1003 return omap_sham_handle_queue(dd
, req
);
1006 static int omap_sham_update(struct ahash_request
*req
)
1008 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
1013 ctx
->total
= req
->nbytes
;
1017 if (ctx
->flags
& BIT(FLAGS_FINUP
)) {
1018 if ((ctx
->digcnt
+ ctx
->bufcnt
+ ctx
->total
) < 9) {
1020 * OMAP HW accel works only with buffers >= 9
1021 * will switch to bypass in final()
1022 * final has the same request and data
1024 omap_sham_append_sg(ctx
);
1026 } else if (ctx
->bufcnt
+ ctx
->total
<= SHA1_MD5_BLOCK_SIZE
) {
1028 * faster to use CPU for short transfers
1030 ctx
->flags
|= BIT(FLAGS_CPU
);
1032 } else if (ctx
->bufcnt
+ ctx
->total
< ctx
->buflen
) {
1033 omap_sham_append_sg(ctx
);
1037 return omap_sham_enqueue(req
, OP_UPDATE
);
1040 static int omap_sham_shash_digest(struct crypto_shash
*shash
, u32 flags
,
1041 const u8
*data
, unsigned int len
, u8
*out
)
1044 struct shash_desc shash
;
1045 char ctx
[crypto_shash_descsize(shash
)];
1048 desc
.shash
.tfm
= shash
;
1049 desc
.shash
.flags
= flags
& CRYPTO_TFM_REQ_MAY_SLEEP
;
1051 return crypto_shash_digest(&desc
.shash
, data
, len
, out
);
1054 static int omap_sham_final_shash(struct ahash_request
*req
)
1056 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
1057 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
1059 return omap_sham_shash_digest(tctx
->fallback
, req
->base
.flags
,
1060 ctx
->buffer
, ctx
->bufcnt
, req
->result
);
1063 static int omap_sham_final(struct ahash_request
*req
)
1065 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
1067 ctx
->flags
|= BIT(FLAGS_FINUP
);
1069 if (ctx
->flags
& BIT(FLAGS_ERROR
))
1070 return 0; /* uncompleted hash is not needed */
1072 /* OMAP HW accel works only with buffers >= 9 */
1073 /* HMAC is always >= 9 because ipad == block size */
1074 if ((ctx
->digcnt
+ ctx
->bufcnt
) < 9)
1075 return omap_sham_final_shash(req
);
1076 else if (ctx
->bufcnt
)
1077 return omap_sham_enqueue(req
, OP_FINAL
);
1079 /* copy ready hash (+ finalize hmac) */
1080 return omap_sham_finish(req
);
1083 static int omap_sham_finup(struct ahash_request
*req
)
1085 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
1088 ctx
->flags
|= BIT(FLAGS_FINUP
);
1090 err1
= omap_sham_update(req
);
1091 if (err1
== -EINPROGRESS
|| err1
== -EBUSY
)
1094 * final() has to be always called to cleanup resources
1095 * even if udpate() failed, except EINPROGRESS
1097 err2
= omap_sham_final(req
);
1099 return err1
?: err2
;
1102 static int omap_sham_digest(struct ahash_request
*req
)
1104 return omap_sham_init(req
) ?: omap_sham_finup(req
);
1107 static int omap_sham_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1108 unsigned int keylen
)
1110 struct omap_sham_ctx
*tctx
= crypto_ahash_ctx(tfm
);
1111 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
1112 int bs
= crypto_shash_blocksize(bctx
->shash
);
1113 int ds
= crypto_shash_digestsize(bctx
->shash
);
1114 struct omap_sham_dev
*dd
= NULL
, *tmp
;
1117 spin_lock_bh(&sham
.lock
);
1119 list_for_each_entry(tmp
, &sham
.dev_list
, list
) {
1127 spin_unlock_bh(&sham
.lock
);
1129 err
= crypto_shash_setkey(tctx
->fallback
, key
, keylen
);
1134 err
= omap_sham_shash_digest(bctx
->shash
,
1135 crypto_shash_get_flags(bctx
->shash
),
1136 key
, keylen
, bctx
->ipad
);
1141 memcpy(bctx
->ipad
, key
, keylen
);
1144 memset(bctx
->ipad
+ keylen
, 0, bs
- keylen
);
1146 if (!test_bit(FLAGS_AUTO_XOR
, &dd
->flags
)) {
1147 memcpy(bctx
->opad
, bctx
->ipad
, bs
);
1149 for (i
= 0; i
< bs
; i
++) {
1150 bctx
->ipad
[i
] ^= 0x36;
1151 bctx
->opad
[i
] ^= 0x5c;
1158 static int omap_sham_cra_init_alg(struct crypto_tfm
*tfm
, const char *alg_base
)
1160 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(tfm
);
1161 const char *alg_name
= crypto_tfm_alg_name(tfm
);
1163 /* Allocate a fallback and abort if it failed. */
1164 tctx
->fallback
= crypto_alloc_shash(alg_name
, 0,
1165 CRYPTO_ALG_NEED_FALLBACK
);
1166 if (IS_ERR(tctx
->fallback
)) {
1167 pr_err("omap-sham: fallback driver '%s' "
1168 "could not be loaded.\n", alg_name
);
1169 return PTR_ERR(tctx
->fallback
);
1172 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1173 sizeof(struct omap_sham_reqctx
) + BUFLEN
);
1176 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
1177 tctx
->flags
|= BIT(FLAGS_HMAC
);
1178 bctx
->shash
= crypto_alloc_shash(alg_base
, 0,
1179 CRYPTO_ALG_NEED_FALLBACK
);
1180 if (IS_ERR(bctx
->shash
)) {
1181 pr_err("omap-sham: base driver '%s' "
1182 "could not be loaded.\n", alg_base
);
1183 crypto_free_shash(tctx
->fallback
);
1184 return PTR_ERR(bctx
->shash
);
1192 static int omap_sham_cra_init(struct crypto_tfm
*tfm
)
1194 return omap_sham_cra_init_alg(tfm
, NULL
);
1197 static int omap_sham_cra_sha1_init(struct crypto_tfm
*tfm
)
1199 return omap_sham_cra_init_alg(tfm
, "sha1");
1202 static int omap_sham_cra_sha224_init(struct crypto_tfm
*tfm
)
1204 return omap_sham_cra_init_alg(tfm
, "sha224");
1207 static int omap_sham_cra_sha256_init(struct crypto_tfm
*tfm
)
1209 return omap_sham_cra_init_alg(tfm
, "sha256");
1212 static int omap_sham_cra_md5_init(struct crypto_tfm
*tfm
)
1214 return omap_sham_cra_init_alg(tfm
, "md5");
1217 static void omap_sham_cra_exit(struct crypto_tfm
*tfm
)
1219 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(tfm
);
1221 crypto_free_shash(tctx
->fallback
);
1222 tctx
->fallback
= NULL
;
1224 if (tctx
->flags
& BIT(FLAGS_HMAC
)) {
1225 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
1226 crypto_free_shash(bctx
->shash
);
1230 static struct ahash_alg algs_sha1_md5
[] = {
1232 .init
= omap_sham_init
,
1233 .update
= omap_sham_update
,
1234 .final
= omap_sham_final
,
1235 .finup
= omap_sham_finup
,
1236 .digest
= omap_sham_digest
,
1237 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
1240 .cra_driver_name
= "omap-sha1",
1241 .cra_priority
= 100,
1242 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
1243 CRYPTO_ALG_KERN_DRIVER_ONLY
|
1245 CRYPTO_ALG_NEED_FALLBACK
,
1246 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1247 .cra_ctxsize
= sizeof(struct omap_sham_ctx
),
1249 .cra_module
= THIS_MODULE
,
1250 .cra_init
= omap_sham_cra_init
,
1251 .cra_exit
= omap_sham_cra_exit
,
1255 .init
= omap_sham_init
,
1256 .update
= omap_sham_update
,
1257 .final
= omap_sham_final
,
1258 .finup
= omap_sham_finup
,
1259 .digest
= omap_sham_digest
,
1260 .halg
.digestsize
= MD5_DIGEST_SIZE
,
1263 .cra_driver_name
= "omap-md5",
1264 .cra_priority
= 100,
1265 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
1266 CRYPTO_ALG_KERN_DRIVER_ONLY
|
1268 CRYPTO_ALG_NEED_FALLBACK
,
1269 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1270 .cra_ctxsize
= sizeof(struct omap_sham_ctx
),
1271 .cra_alignmask
= OMAP_ALIGN_MASK
,
1272 .cra_module
= THIS_MODULE
,
1273 .cra_init
= omap_sham_cra_init
,
1274 .cra_exit
= omap_sham_cra_exit
,
1278 .init
= omap_sham_init
,
1279 .update
= omap_sham_update
,
1280 .final
= omap_sham_final
,
1281 .finup
= omap_sham_finup
,
1282 .digest
= omap_sham_digest
,
1283 .setkey
= omap_sham_setkey
,
1284 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
1286 .cra_name
= "hmac(sha1)",
1287 .cra_driver_name
= "omap-hmac-sha1",
1288 .cra_priority
= 100,
1289 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
1290 CRYPTO_ALG_KERN_DRIVER_ONLY
|
1292 CRYPTO_ALG_NEED_FALLBACK
,
1293 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1294 .cra_ctxsize
= sizeof(struct omap_sham_ctx
) +
1295 sizeof(struct omap_sham_hmac_ctx
),
1296 .cra_alignmask
= OMAP_ALIGN_MASK
,
1297 .cra_module
= THIS_MODULE
,
1298 .cra_init
= omap_sham_cra_sha1_init
,
1299 .cra_exit
= omap_sham_cra_exit
,
1303 .init
= omap_sham_init
,
1304 .update
= omap_sham_update
,
1305 .final
= omap_sham_final
,
1306 .finup
= omap_sham_finup
,
1307 .digest
= omap_sham_digest
,
1308 .setkey
= omap_sham_setkey
,
1309 .halg
.digestsize
= MD5_DIGEST_SIZE
,
1311 .cra_name
= "hmac(md5)",
1312 .cra_driver_name
= "omap-hmac-md5",
1313 .cra_priority
= 100,
1314 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
1315 CRYPTO_ALG_KERN_DRIVER_ONLY
|
1317 CRYPTO_ALG_NEED_FALLBACK
,
1318 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1319 .cra_ctxsize
= sizeof(struct omap_sham_ctx
) +
1320 sizeof(struct omap_sham_hmac_ctx
),
1321 .cra_alignmask
= OMAP_ALIGN_MASK
,
1322 .cra_module
= THIS_MODULE
,
1323 .cra_init
= omap_sham_cra_md5_init
,
1324 .cra_exit
= omap_sham_cra_exit
,
1329 /* OMAP4 has some algs in addition to what OMAP2 has */
1330 static struct ahash_alg algs_sha224_sha256
[] = {
1332 .init
= omap_sham_init
,
1333 .update
= omap_sham_update
,
1334 .final
= omap_sham_final
,
1335 .finup
= omap_sham_finup
,
1336 .digest
= omap_sham_digest
,
1337 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
1339 .cra_name
= "sha224",
1340 .cra_driver_name
= "omap-sha224",
1341 .cra_priority
= 100,
1342 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
1344 CRYPTO_ALG_NEED_FALLBACK
,
1345 .cra_blocksize
= SHA224_BLOCK_SIZE
,
1346 .cra_ctxsize
= sizeof(struct omap_sham_ctx
),
1348 .cra_module
= THIS_MODULE
,
1349 .cra_init
= omap_sham_cra_init
,
1350 .cra_exit
= omap_sham_cra_exit
,
1354 .init
= omap_sham_init
,
1355 .update
= omap_sham_update
,
1356 .final
= omap_sham_final
,
1357 .finup
= omap_sham_finup
,
1358 .digest
= omap_sham_digest
,
1359 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
1361 .cra_name
= "sha256",
1362 .cra_driver_name
= "omap-sha256",
1363 .cra_priority
= 100,
1364 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
1366 CRYPTO_ALG_NEED_FALLBACK
,
1367 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1368 .cra_ctxsize
= sizeof(struct omap_sham_ctx
),
1370 .cra_module
= THIS_MODULE
,
1371 .cra_init
= omap_sham_cra_init
,
1372 .cra_exit
= omap_sham_cra_exit
,
1376 .init
= omap_sham_init
,
1377 .update
= omap_sham_update
,
1378 .final
= omap_sham_final
,
1379 .finup
= omap_sham_finup
,
1380 .digest
= omap_sham_digest
,
1381 .setkey
= omap_sham_setkey
,
1382 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
1384 .cra_name
= "hmac(sha224)",
1385 .cra_driver_name
= "omap-hmac-sha224",
1386 .cra_priority
= 100,
1387 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
1389 CRYPTO_ALG_NEED_FALLBACK
,
1390 .cra_blocksize
= SHA224_BLOCK_SIZE
,
1391 .cra_ctxsize
= sizeof(struct omap_sham_ctx
) +
1392 sizeof(struct omap_sham_hmac_ctx
),
1393 .cra_alignmask
= OMAP_ALIGN_MASK
,
1394 .cra_module
= THIS_MODULE
,
1395 .cra_init
= omap_sham_cra_sha224_init
,
1396 .cra_exit
= omap_sham_cra_exit
,
1400 .init
= omap_sham_init
,
1401 .update
= omap_sham_update
,
1402 .final
= omap_sham_final
,
1403 .finup
= omap_sham_finup
,
1404 .digest
= omap_sham_digest
,
1405 .setkey
= omap_sham_setkey
,
1406 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
1408 .cra_name
= "hmac(sha256)",
1409 .cra_driver_name
= "omap-hmac-sha256",
1410 .cra_priority
= 100,
1411 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
1413 CRYPTO_ALG_NEED_FALLBACK
,
1414 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1415 .cra_ctxsize
= sizeof(struct omap_sham_ctx
) +
1416 sizeof(struct omap_sham_hmac_ctx
),
1417 .cra_alignmask
= OMAP_ALIGN_MASK
,
1418 .cra_module
= THIS_MODULE
,
1419 .cra_init
= omap_sham_cra_sha256_init
,
1420 .cra_exit
= omap_sham_cra_exit
,
1425 static void omap_sham_done_task(unsigned long data
)
1427 struct omap_sham_dev
*dd
= (struct omap_sham_dev
*)data
;
1430 if (!test_bit(FLAGS_BUSY
, &dd
->flags
)) {
1431 omap_sham_handle_queue(dd
, NULL
);
1435 if (test_bit(FLAGS_CPU
, &dd
->flags
)) {
1436 if (test_and_clear_bit(FLAGS_OUTPUT_READY
, &dd
->flags
))
1438 } else if (test_bit(FLAGS_DMA_READY
, &dd
->flags
)) {
1439 if (test_and_clear_bit(FLAGS_DMA_ACTIVE
, &dd
->flags
)) {
1440 omap_sham_update_dma_stop(dd
);
1446 if (test_and_clear_bit(FLAGS_OUTPUT_READY
, &dd
->flags
)) {
1447 /* hash or semi-hash ready */
1448 clear_bit(FLAGS_DMA_READY
, &dd
->flags
);
1449 err
= omap_sham_update_dma_start(dd
);
1450 if (err
!= -EINPROGRESS
)
1458 dev_dbg(dd
->dev
, "update done: err: %d\n", err
);
1459 /* finish curent request */
1460 omap_sham_finish_req(dd
->req
, err
);
1463 static irqreturn_t
omap_sham_irq_common(struct omap_sham_dev
*dd
)
1465 if (!test_bit(FLAGS_BUSY
, &dd
->flags
)) {
1466 dev_warn(dd
->dev
, "Interrupt when no active requests.\n");
1468 set_bit(FLAGS_OUTPUT_READY
, &dd
->flags
);
1469 tasklet_schedule(&dd
->done_task
);
1475 static irqreturn_t
omap_sham_irq_omap2(int irq
, void *dev_id
)
1477 struct omap_sham_dev
*dd
= dev_id
;
1479 if (unlikely(test_bit(FLAGS_FINAL
, &dd
->flags
)))
1480 /* final -> allow device to go to power-saving mode */
1481 omap_sham_write_mask(dd
, SHA_REG_CTRL
, 0, SHA_REG_CTRL_LENGTH
);
1483 omap_sham_write_mask(dd
, SHA_REG_CTRL
, SHA_REG_CTRL_OUTPUT_READY
,
1484 SHA_REG_CTRL_OUTPUT_READY
);
1485 omap_sham_read(dd
, SHA_REG_CTRL
);
1487 return omap_sham_irq_common(dd
);
1490 static irqreturn_t
omap_sham_irq_omap4(int irq
, void *dev_id
)
1492 struct omap_sham_dev
*dd
= dev_id
;
1494 omap_sham_write_mask(dd
, SHA_REG_MASK(dd
), 0, SHA_REG_MASK_IT_EN
);
1496 return omap_sham_irq_common(dd
);
1499 static struct omap_sham_algs_info omap_sham_algs_info_omap2
[] = {
1501 .algs_list
= algs_sha1_md5
,
1502 .size
= ARRAY_SIZE(algs_sha1_md5
),
1506 static const struct omap_sham_pdata omap_sham_pdata_omap2
= {
1507 .algs_info
= omap_sham_algs_info_omap2
,
1508 .algs_info_size
= ARRAY_SIZE(omap_sham_algs_info_omap2
),
1509 .flags
= BIT(FLAGS_BE32_SHA1
),
1510 .digest_size
= SHA1_DIGEST_SIZE
,
1511 .copy_hash
= omap_sham_copy_hash_omap2
,
1512 .write_ctrl
= omap_sham_write_ctrl_omap2
,
1513 .trigger
= omap_sham_trigger_omap2
,
1514 .poll_irq
= omap_sham_poll_irq_omap2
,
1515 .intr_hdlr
= omap_sham_irq_omap2
,
1516 .idigest_ofs
= 0x00,
1521 .sysstatus_ofs
= 0x64,
1529 static struct omap_sham_algs_info omap_sham_algs_info_omap4
[] = {
1531 .algs_list
= algs_sha1_md5
,
1532 .size
= ARRAY_SIZE(algs_sha1_md5
),
1535 .algs_list
= algs_sha224_sha256
,
1536 .size
= ARRAY_SIZE(algs_sha224_sha256
),
1540 static const struct omap_sham_pdata omap_sham_pdata_omap4
= {
1541 .algs_info
= omap_sham_algs_info_omap4
,
1542 .algs_info_size
= ARRAY_SIZE(omap_sham_algs_info_omap4
),
1543 .flags
= BIT(FLAGS_AUTO_XOR
),
1544 .digest_size
= SHA256_DIGEST_SIZE
,
1545 .copy_hash
= omap_sham_copy_hash_omap4
,
1546 .write_ctrl
= omap_sham_write_ctrl_omap4
,
1547 .trigger
= omap_sham_trigger_omap4
,
1548 .poll_irq
= omap_sham_poll_irq_omap4
,
1549 .intr_hdlr
= omap_sham_irq_omap4
,
1550 .idigest_ofs
= 0x020,
1552 .digcnt_ofs
= 0x040,
1555 .sysstatus_ofs
= 0x114,
1556 .major_mask
= 0x0700,
1558 .minor_mask
= 0x003f,
1562 static const struct of_device_id omap_sham_of_match
[] = {
1564 .compatible
= "ti,omap2-sham",
1565 .data
= &omap_sham_pdata_omap2
,
1568 .compatible
= "ti,omap4-sham",
1569 .data
= &omap_sham_pdata_omap4
,
1573 MODULE_DEVICE_TABLE(of
, omap_sham_of_match
);
1575 static int omap_sham_get_res_of(struct omap_sham_dev
*dd
,
1576 struct device
*dev
, struct resource
*res
)
1578 struct device_node
*node
= dev
->of_node
;
1579 const struct of_device_id
*match
;
1582 match
= of_match_device(of_match_ptr(omap_sham_of_match
), dev
);
1584 dev_err(dev
, "no compatible OF match\n");
1589 err
= of_address_to_resource(node
, 0, res
);
1591 dev_err(dev
, "can't translate OF node address\n");
1596 dd
->irq
= of_irq_to_resource(node
, 0, NULL
);
1598 dev_err(dev
, "can't translate OF irq value\n");
1603 dd
->dma
= -1; /* Dummy value that's unused */
1604 dd
->pdata
= match
->data
;
1610 static const struct of_device_id omap_sham_of_match
[] = {
1614 static int omap_sham_get_res_of(struct omap_sham_dev
*dd
,
1615 struct device
*dev
, struct resource
*res
)
1621 static int omap_sham_get_res_pdev(struct omap_sham_dev
*dd
,
1622 struct platform_device
*pdev
, struct resource
*res
)
1624 struct device
*dev
= &pdev
->dev
;
1628 /* Get the base address */
1629 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1631 dev_err(dev
, "no MEM resource info\n");
1635 memcpy(res
, r
, sizeof(*res
));
1638 dd
->irq
= platform_get_irq(pdev
, 0);
1640 dev_err(dev
, "no IRQ resource info\n");
1646 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1648 dev_err(dev
, "no DMA resource info\n");
1654 /* Only OMAP2/3 can be non-DT */
1655 dd
->pdata
= &omap_sham_pdata_omap2
;
1661 static int omap_sham_probe(struct platform_device
*pdev
)
1663 struct omap_sham_dev
*dd
;
1664 struct device
*dev
= &pdev
->dev
;
1665 struct resource res
;
1666 dma_cap_mask_t mask
;
1670 dd
= kzalloc(sizeof(struct omap_sham_dev
), GFP_KERNEL
);
1672 dev_err(dev
, "unable to alloc data struct.\n");
1677 platform_set_drvdata(pdev
, dd
);
1679 INIT_LIST_HEAD(&dd
->list
);
1680 spin_lock_init(&dd
->lock
);
1681 tasklet_init(&dd
->done_task
, omap_sham_done_task
, (unsigned long)dd
);
1682 crypto_init_queue(&dd
->queue
, OMAP_SHAM_QUEUE_LENGTH
);
1684 err
= (dev
->of_node
) ? omap_sham_get_res_of(dd
, dev
, &res
) :
1685 omap_sham_get_res_pdev(dd
, pdev
, &res
);
1689 dd
->io_base
= devm_request_and_ioremap(dev
, &res
);
1691 dev_err(dev
, "can't ioremap\n");
1695 dd
->phys_base
= res
.start
;
1697 err
= request_irq(dd
->irq
, dd
->pdata
->intr_hdlr
, IRQF_TRIGGER_LOW
,
1700 dev_err(dev
, "unable to request irq.\n");
1705 dma_cap_set(DMA_SLAVE
, mask
);
1707 dd
->dma_lch
= dma_request_slave_channel_compat(mask
, omap_dma_filter_fn
,
1708 &dd
->dma
, dev
, "rx");
1710 dev_err(dev
, "unable to obtain RX DMA engine channel %u\n",
1716 dd
->flags
|= dd
->pdata
->flags
;
1718 pm_runtime_enable(dev
);
1719 pm_runtime_get_sync(dev
);
1720 rev
= omap_sham_read(dd
, SHA_REG_REV(dd
));
1721 pm_runtime_put_sync(&pdev
->dev
);
1723 dev_info(dev
, "hw accel on OMAP rev %u.%u\n",
1724 (rev
& dd
->pdata
->major_mask
) >> dd
->pdata
->major_shift
,
1725 (rev
& dd
->pdata
->minor_mask
) >> dd
->pdata
->minor_shift
);
1727 spin_lock(&sham
.lock
);
1728 list_add_tail(&dd
->list
, &sham
.dev_list
);
1729 spin_unlock(&sham
.lock
);
1731 for (i
= 0; i
< dd
->pdata
->algs_info_size
; i
++) {
1732 for (j
= 0; j
< dd
->pdata
->algs_info
[i
].size
; j
++) {
1733 err
= crypto_register_ahash(
1734 &dd
->pdata
->algs_info
[i
].algs_list
[j
]);
1738 dd
->pdata
->algs_info
[i
].registered
++;
1745 for (i
= dd
->pdata
->algs_info_size
- 1; i
>= 0; i
--)
1746 for (j
= dd
->pdata
->algs_info
[i
].registered
- 1; j
>= 0; j
--)
1747 crypto_unregister_ahash(
1748 &dd
->pdata
->algs_info
[i
].algs_list
[j
]);
1749 pm_runtime_disable(dev
);
1750 dma_release_channel(dd
->dma_lch
);
1752 free_irq(dd
->irq
, dd
);
1757 dev_err(dev
, "initialization failed.\n");
1762 static int omap_sham_remove(struct platform_device
*pdev
)
1764 static struct omap_sham_dev
*dd
;
1767 dd
= platform_get_drvdata(pdev
);
1770 spin_lock(&sham
.lock
);
1771 list_del(&dd
->list
);
1772 spin_unlock(&sham
.lock
);
1773 for (i
= dd
->pdata
->algs_info_size
- 1; i
>= 0; i
--)
1774 for (j
= dd
->pdata
->algs_info
[i
].registered
- 1; j
>= 0; j
--)
1775 crypto_unregister_ahash(
1776 &dd
->pdata
->algs_info
[i
].algs_list
[j
]);
1777 tasklet_kill(&dd
->done_task
);
1778 pm_runtime_disable(&pdev
->dev
);
1779 dma_release_channel(dd
->dma_lch
);
1780 free_irq(dd
->irq
, dd
);
1787 #ifdef CONFIG_PM_SLEEP
1788 static int omap_sham_suspend(struct device
*dev
)
1790 pm_runtime_put_sync(dev
);
1794 static int omap_sham_resume(struct device
*dev
)
1796 pm_runtime_get_sync(dev
);
1801 static const struct dev_pm_ops omap_sham_pm_ops
= {
1802 SET_SYSTEM_SLEEP_PM_OPS(omap_sham_suspend
, omap_sham_resume
)
1805 static struct platform_driver omap_sham_driver
= {
1806 .probe
= omap_sham_probe
,
1807 .remove
= omap_sham_remove
,
1809 .name
= "omap-sham",
1810 .owner
= THIS_MODULE
,
1811 .pm
= &omap_sham_pm_ops
,
1812 .of_match_table
= omap_sham_of_match
,
1816 static int __init
omap_sham_mod_init(void)
1818 return platform_driver_register(&omap_sham_driver
);
1821 static void __exit
omap_sham_mod_exit(void)
1823 platform_driver_unregister(&omap_sham_driver
);
1826 module_init(omap_sham_mod_init
);
1827 module_exit(omap_sham_mod_exit
);
1829 MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
1830 MODULE_LICENSE("GPL v2");
1831 MODULE_AUTHOR("Dmitry Kasatkin");