4 * Support for ATMEL AES HW acceleration.
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
13 * Some ideas are from omap-aes.c driver.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
23 #include <linux/hw_random.h>
24 #include <linux/platform_device.h>
26 #include <linux/device.h>
27 #include <linux/init.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/scatterlist.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/of_device.h>
34 #include <linux/delay.h>
35 #include <linux/crypto.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/algapi.h>
38 #include <crypto/aes.h>
39 #include <linux/platform_data/crypto-atmel.h>
40 #include <dt-bindings/dma/at91.h>
41 #include "atmel-aes-regs.h"
43 #define ATMEL_AES_PRIORITY 300
45 #define ATMEL_AES_BUFFER_ORDER 2
46 #define ATMEL_AES_BUFFER_SIZE (PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
48 #define CFB8_BLOCK_SIZE 1
49 #define CFB16_BLOCK_SIZE 2
50 #define CFB32_BLOCK_SIZE 4
51 #define CFB64_BLOCK_SIZE 8
53 #define SIZE_IN_WORDS(x) ((x) >> 2)
56 /* Reserve bits [18:16] [14:12] [0] for mode (same as for AES_MR) */
57 #define AES_FLAGS_ENCRYPT AES_MR_CYPHER_ENC
58 #define AES_FLAGS_OPMODE_MASK (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
59 #define AES_FLAGS_ECB AES_MR_OPMOD_ECB
60 #define AES_FLAGS_CBC AES_MR_OPMOD_CBC
61 #define AES_FLAGS_OFB AES_MR_OPMOD_OFB
62 #define AES_FLAGS_CFB128 (AES_MR_OPMOD_CFB | AES_MR_CFBS_128b)
63 #define AES_FLAGS_CFB64 (AES_MR_OPMOD_CFB | AES_MR_CFBS_64b)
64 #define AES_FLAGS_CFB32 (AES_MR_OPMOD_CFB | AES_MR_CFBS_32b)
65 #define AES_FLAGS_CFB16 (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b)
66 #define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
67 #define AES_FLAGS_CTR AES_MR_OPMOD_CTR
69 #define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \
72 #define AES_FLAGS_INIT BIT(2)
73 #define AES_FLAGS_BUSY BIT(3)
75 #define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY)
77 #define ATMEL_AES_QUEUE_LENGTH 50
79 #define ATMEL_AES_DMA_THRESHOLD 16
82 struct atmel_aes_caps
{
91 typedef int (*atmel_aes_fn_t
)(struct atmel_aes_dev
*);
94 struct atmel_aes_base_ctx
{
95 struct atmel_aes_dev
*dd
;
99 u32 key
[AES_KEYSIZE_256
/ sizeof(u32
)];
104 struct atmel_aes_ctx
{
105 struct atmel_aes_base_ctx base
;
108 struct atmel_aes_reqctx
{
112 struct atmel_aes_dma
{
113 struct dma_chan
*chan
;
114 struct scatterlist
*sg
;
116 unsigned int remainder
;
120 struct atmel_aes_dev
{
121 struct list_head list
;
122 unsigned long phys_base
;
123 void __iomem
*io_base
;
125 struct crypto_async_request
*areq
;
126 struct atmel_aes_base_ctx
*ctx
;
129 atmel_aes_fn_t resume
;
130 atmel_aes_fn_t cpu_transfer_complete
;
139 struct crypto_queue queue
;
141 struct tasklet_struct done_task
;
142 struct tasklet_struct queue_task
;
148 struct atmel_aes_dma src
;
149 struct atmel_aes_dma dst
;
153 struct scatterlist aligned_sg
;
154 struct scatterlist
*real_dst
;
156 struct atmel_aes_caps caps
;
161 struct atmel_aes_drv
{
162 struct list_head dev_list
;
166 static struct atmel_aes_drv atmel_aes
= {
167 .dev_list
= LIST_HEAD_INIT(atmel_aes
.dev_list
),
168 .lock
= __SPIN_LOCK_UNLOCKED(atmel_aes
.lock
),
172 static inline u32
atmel_aes_read(struct atmel_aes_dev
*dd
, u32 offset
)
174 return readl_relaxed(dd
->io_base
+ offset
);
177 static inline void atmel_aes_write(struct atmel_aes_dev
*dd
,
178 u32 offset
, u32 value
)
180 writel_relaxed(value
, dd
->io_base
+ offset
);
183 static void atmel_aes_read_n(struct atmel_aes_dev
*dd
, u32 offset
,
184 u32
*value
, int count
)
186 for (; count
--; value
++, offset
+= 4)
187 *value
= atmel_aes_read(dd
, offset
);
190 static void atmel_aes_write_n(struct atmel_aes_dev
*dd
, u32 offset
,
191 const u32
*value
, int count
)
193 for (; count
--; value
++, offset
+= 4)
194 atmel_aes_write(dd
, offset
, *value
);
197 static inline void atmel_aes_read_block(struct atmel_aes_dev
*dd
, u32 offset
,
200 atmel_aes_read_n(dd
, offset
, value
, SIZE_IN_WORDS(AES_BLOCK_SIZE
));
203 static inline void atmel_aes_write_block(struct atmel_aes_dev
*dd
, u32 offset
,
206 atmel_aes_write_n(dd
, offset
, value
, SIZE_IN_WORDS(AES_BLOCK_SIZE
));
209 static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev
*dd
,
210 atmel_aes_fn_t resume
)
212 u32 isr
= atmel_aes_read(dd
, AES_ISR
);
214 if (unlikely(isr
& AES_INT_DATARDY
))
218 atmel_aes_write(dd
, AES_IER
, AES_INT_DATARDY
);
222 static inline size_t atmel_aes_padlen(size_t len
, size_t block_size
)
224 len
&= block_size
- 1;
225 return len
? block_size
- len
: 0;
228 static struct atmel_aes_dev
*atmel_aes_find_dev(struct atmel_aes_base_ctx
*ctx
)
230 struct atmel_aes_dev
*aes_dd
= NULL
;
231 struct atmel_aes_dev
*tmp
;
233 spin_lock_bh(&atmel_aes
.lock
);
235 list_for_each_entry(tmp
, &atmel_aes
.dev_list
, list
) {
244 spin_unlock_bh(&atmel_aes
.lock
);
249 static int atmel_aes_hw_init(struct atmel_aes_dev
*dd
)
253 err
= clk_prepare_enable(dd
->iclk
);
257 if (!(dd
->flags
& AES_FLAGS_INIT
)) {
258 atmel_aes_write(dd
, AES_CR
, AES_CR_SWRST
);
259 atmel_aes_write(dd
, AES_MR
, 0xE << AES_MR_CKEY_OFFSET
);
260 dd
->flags
|= AES_FLAGS_INIT
;
266 static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev
*dd
)
268 return atmel_aes_read(dd
, AES_HW_VERSION
) & 0x00000fff;
271 static int atmel_aes_hw_version_init(struct atmel_aes_dev
*dd
)
275 err
= atmel_aes_hw_init(dd
);
279 dd
->hw_version
= atmel_aes_get_version(dd
);
281 dev_info(dd
->dev
, "version: 0x%x\n", dd
->hw_version
);
283 clk_disable_unprepare(dd
->iclk
);
287 static inline void atmel_aes_set_mode(struct atmel_aes_dev
*dd
,
288 const struct atmel_aes_reqctx
*rctx
)
290 /* Clear all but persistent flags and set request flags. */
291 dd
->flags
= (dd
->flags
& AES_FLAGS_PERSISTENT
) | rctx
->mode
;
294 static inline int atmel_aes_complete(struct atmel_aes_dev
*dd
, int err
)
296 clk_disable_unprepare(dd
->iclk
);
297 dd
->flags
&= ~AES_FLAGS_BUSY
;
300 dd
->areq
->complete(dd
->areq
, err
);
302 tasklet_schedule(&dd
->queue_task
);
310 static int atmel_aes_cpu_transfer(struct atmel_aes_dev
*dd
)
316 atmel_aes_read_block(dd
, AES_ODATAR(0), dd
->data
);
318 dd
->datalen
-= AES_BLOCK_SIZE
;
320 if (dd
->datalen
< AES_BLOCK_SIZE
)
323 atmel_aes_write_block(dd
, AES_IDATAR(0), dd
->data
);
325 isr
= atmel_aes_read(dd
, AES_ISR
);
326 if (!(isr
& AES_INT_DATARDY
)) {
327 dd
->resume
= atmel_aes_cpu_transfer
;
328 atmel_aes_write(dd
, AES_IER
, AES_INT_DATARDY
);
333 if (!sg_copy_from_buffer(dd
->real_dst
, sg_nents(dd
->real_dst
),
338 return atmel_aes_complete(dd
, err
);
340 return dd
->cpu_transfer_complete(dd
);
343 static int atmel_aes_cpu_start(struct atmel_aes_dev
*dd
,
344 struct scatterlist
*src
,
345 struct scatterlist
*dst
,
347 atmel_aes_fn_t resume
)
349 size_t padlen
= atmel_aes_padlen(len
, AES_BLOCK_SIZE
);
351 if (unlikely(len
== 0))
354 sg_copy_to_buffer(src
, sg_nents(src
), dd
->buf
, len
);
358 dd
->cpu_transfer_complete
= resume
;
359 dd
->datalen
= len
+ padlen
;
360 dd
->data
= (u32
*)dd
->buf
;
361 atmel_aes_write_block(dd
, AES_IDATAR(0), dd
->data
);
362 return atmel_aes_wait_for_data_ready(dd
, atmel_aes_cpu_transfer
);
368 static void atmel_aes_dma_callback(void *data
);
370 static bool atmel_aes_check_aligned(struct atmel_aes_dev
*dd
,
371 struct scatterlist
*sg
,
373 struct atmel_aes_dma
*dma
)
377 if (!IS_ALIGNED(len
, dd
->ctx
->block_size
))
380 for (nents
= 0; sg
; sg
= sg_next(sg
), ++nents
) {
381 if (!IS_ALIGNED(sg
->offset
, sizeof(u32
)))
384 if (len
<= sg
->length
) {
385 if (!IS_ALIGNED(len
, dd
->ctx
->block_size
))
388 dma
->nents
= nents
+1;
389 dma
->remainder
= sg
->length
- len
;
394 if (!IS_ALIGNED(sg
->length
, dd
->ctx
->block_size
))
403 static inline void atmel_aes_restore_sg(const struct atmel_aes_dma
*dma
)
405 struct scatterlist
*sg
= dma
->sg
;
406 int nents
= dma
->nents
;
411 while (--nents
> 0 && sg
)
417 sg
->length
+= dma
->remainder
;
420 static int atmel_aes_map(struct atmel_aes_dev
*dd
,
421 struct scatterlist
*src
,
422 struct scatterlist
*dst
,
425 bool src_aligned
, dst_aligned
;
433 src_aligned
= atmel_aes_check_aligned(dd
, src
, len
, &dd
->src
);
435 dst_aligned
= src_aligned
;
437 dst_aligned
= atmel_aes_check_aligned(dd
, dst
, len
, &dd
->dst
);
438 if (!src_aligned
|| !dst_aligned
) {
439 padlen
= atmel_aes_padlen(len
, dd
->ctx
->block_size
);
441 if (dd
->buflen
< len
+ padlen
)
445 sg_copy_to_buffer(src
, sg_nents(src
), dd
->buf
, len
);
446 dd
->src
.sg
= &dd
->aligned_sg
;
448 dd
->src
.remainder
= 0;
452 dd
->dst
.sg
= &dd
->aligned_sg
;
454 dd
->dst
.remainder
= 0;
457 sg_init_table(&dd
->aligned_sg
, 1);
458 sg_set_buf(&dd
->aligned_sg
, dd
->buf
, len
+ padlen
);
461 if (dd
->src
.sg
== dd
->dst
.sg
) {
462 dd
->src
.sg_len
= dma_map_sg(dd
->dev
, dd
->src
.sg
, dd
->src
.nents
,
464 dd
->dst
.sg_len
= dd
->src
.sg_len
;
468 dd
->src
.sg_len
= dma_map_sg(dd
->dev
, dd
->src
.sg
, dd
->src
.nents
,
473 dd
->dst
.sg_len
= dma_map_sg(dd
->dev
, dd
->dst
.sg
, dd
->dst
.nents
,
475 if (!dd
->dst
.sg_len
) {
476 dma_unmap_sg(dd
->dev
, dd
->src
.sg
, dd
->src
.nents
,
485 static void atmel_aes_unmap(struct atmel_aes_dev
*dd
)
487 if (dd
->src
.sg
== dd
->dst
.sg
) {
488 dma_unmap_sg(dd
->dev
, dd
->src
.sg
, dd
->src
.nents
,
491 if (dd
->src
.sg
!= &dd
->aligned_sg
)
492 atmel_aes_restore_sg(&dd
->src
);
494 dma_unmap_sg(dd
->dev
, dd
->dst
.sg
, dd
->dst
.nents
,
497 if (dd
->dst
.sg
!= &dd
->aligned_sg
)
498 atmel_aes_restore_sg(&dd
->dst
);
500 dma_unmap_sg(dd
->dev
, dd
->src
.sg
, dd
->src
.nents
,
503 if (dd
->src
.sg
!= &dd
->aligned_sg
)
504 atmel_aes_restore_sg(&dd
->src
);
507 if (dd
->dst
.sg
== &dd
->aligned_sg
)
508 sg_copy_from_buffer(dd
->real_dst
, sg_nents(dd
->real_dst
),
512 static int atmel_aes_dma_transfer_start(struct atmel_aes_dev
*dd
,
513 enum dma_slave_buswidth addr_width
,
514 enum dma_transfer_direction dir
,
517 struct dma_async_tx_descriptor
*desc
;
518 struct dma_slave_config config
;
519 dma_async_tx_callback callback
;
520 struct atmel_aes_dma
*dma
;
523 memset(&config
, 0, sizeof(config
));
524 config
.direction
= dir
;
525 config
.src_addr_width
= addr_width
;
526 config
.dst_addr_width
= addr_width
;
527 config
.src_maxburst
= maxburst
;
528 config
.dst_maxburst
= maxburst
;
534 config
.dst_addr
= dd
->phys_base
+ AES_IDATAR(0);
539 callback
= atmel_aes_dma_callback
;
540 config
.src_addr
= dd
->phys_base
+ AES_ODATAR(0);
547 err
= dmaengine_slave_config(dma
->chan
, &config
);
551 desc
= dmaengine_prep_slave_sg(dma
->chan
, dma
->sg
, dma
->sg_len
, dir
,
552 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
556 desc
->callback
= callback
;
557 desc
->callback_param
= dd
;
558 dmaengine_submit(desc
);
559 dma_async_issue_pending(dma
->chan
);
564 static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev
*dd
,
565 enum dma_transfer_direction dir
)
567 struct atmel_aes_dma
*dma
;
582 dmaengine_terminate_all(dma
->chan
);
585 static int atmel_aes_dma_start(struct atmel_aes_dev
*dd
,
586 struct scatterlist
*src
,
587 struct scatterlist
*dst
,
589 atmel_aes_fn_t resume
)
591 enum dma_slave_buswidth addr_width
;
595 switch (dd
->ctx
->block_size
) {
596 case CFB8_BLOCK_SIZE
:
597 addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
601 case CFB16_BLOCK_SIZE
:
602 addr_width
= DMA_SLAVE_BUSWIDTH_2_BYTES
;
606 case CFB32_BLOCK_SIZE
:
607 case CFB64_BLOCK_SIZE
:
608 addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
613 addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
614 maxburst
= dd
->caps
.max_burst_size
;
622 err
= atmel_aes_map(dd
, src
, dst
, len
);
628 /* Set output DMA transfer first */
629 err
= atmel_aes_dma_transfer_start(dd
, addr_width
, DMA_DEV_TO_MEM
,
634 /* Then set input DMA transfer */
635 err
= atmel_aes_dma_transfer_start(dd
, addr_width
, DMA_MEM_TO_DEV
,
638 goto output_transfer_stop
;
642 output_transfer_stop
:
643 atmel_aes_dma_transfer_stop(dd
, DMA_DEV_TO_MEM
);
647 return atmel_aes_complete(dd
, err
);
650 static void atmel_aes_dma_stop(struct atmel_aes_dev
*dd
)
652 atmel_aes_dma_transfer_stop(dd
, DMA_MEM_TO_DEV
);
653 atmel_aes_dma_transfer_stop(dd
, DMA_DEV_TO_MEM
);
657 static void atmel_aes_dma_callback(void *data
)
659 struct atmel_aes_dev
*dd
= data
;
661 atmel_aes_dma_stop(dd
);
663 (void)dd
->resume(dd
);
666 static void atmel_aes_write_ctrl(struct atmel_aes_dev
*dd
, bool use_dma
,
671 /* MR register must be set before IV registers */
672 if (dd
->ctx
->keylen
== AES_KEYSIZE_128
)
673 valmr
|= AES_MR_KEYSIZE_128
;
674 else if (dd
->ctx
->keylen
== AES_KEYSIZE_192
)
675 valmr
|= AES_MR_KEYSIZE_192
;
677 valmr
|= AES_MR_KEYSIZE_256
;
679 valmr
|= dd
->flags
& AES_FLAGS_MODE_MASK
;
682 valmr
|= AES_MR_SMOD_IDATAR0
;
683 if (dd
->caps
.has_dualbuff
)
684 valmr
|= AES_MR_DUALBUFF
;
686 valmr
|= AES_MR_SMOD_AUTO
;
689 atmel_aes_write(dd
, AES_MR
, valmr
);
691 atmel_aes_write_n(dd
, AES_KEYWR(0), dd
->ctx
->key
,
692 dd
->ctx
->keylen
>> 2);
694 if (iv
&& (valmr
& AES_MR_OPMOD_MASK
) != AES_MR_OPMOD_ECB
)
695 atmel_aes_write_n(dd
, AES_IVR(0), iv
, 4);
698 static int atmel_aes_handle_queue(struct atmel_aes_dev
*dd
,
699 struct crypto_async_request
*new_areq
)
701 struct crypto_async_request
*areq
, *backlog
;
702 struct atmel_aes_base_ctx
*ctx
;
706 spin_lock_irqsave(&dd
->lock
, flags
);
708 ret
= crypto_enqueue_request(&dd
->queue
, new_areq
);
709 if (dd
->flags
& AES_FLAGS_BUSY
) {
710 spin_unlock_irqrestore(&dd
->lock
, flags
);
713 backlog
= crypto_get_backlog(&dd
->queue
);
714 areq
= crypto_dequeue_request(&dd
->queue
);
716 dd
->flags
|= AES_FLAGS_BUSY
;
717 spin_unlock_irqrestore(&dd
->lock
, flags
);
723 backlog
->complete(backlog
, -EINPROGRESS
);
725 ctx
= crypto_tfm_ctx(areq
->tfm
);
729 dd
->is_async
= (areq
!= new_areq
);
731 err
= ctx
->start(dd
);
732 return (dd
->is_async
) ? ret
: err
;
735 static int atmel_aes_transfer_complete(struct atmel_aes_dev
*dd
)
737 return atmel_aes_complete(dd
, 0);
740 static int atmel_aes_start(struct atmel_aes_dev
*dd
)
742 struct ablkcipher_request
*req
= ablkcipher_request_cast(dd
->areq
);
743 struct atmel_aes_reqctx
*rctx
= ablkcipher_request_ctx(req
);
744 bool use_dma
= (req
->nbytes
>= ATMEL_AES_DMA_THRESHOLD
||
745 dd
->ctx
->block_size
!= AES_BLOCK_SIZE
);
748 atmel_aes_set_mode(dd
, rctx
);
750 err
= atmel_aes_hw_init(dd
);
752 return atmel_aes_complete(dd
, err
);
754 atmel_aes_write_ctrl(dd
, use_dma
, req
->info
);
756 return atmel_aes_dma_start(dd
, req
->src
, req
->dst
, req
->nbytes
,
757 atmel_aes_transfer_complete
);
759 return atmel_aes_cpu_start(dd
, req
->src
, req
->dst
, req
->nbytes
,
760 atmel_aes_transfer_complete
);
764 static int atmel_aes_buff_init(struct atmel_aes_dev
*dd
)
766 dd
->buf
= (void *)__get_free_pages(GFP_KERNEL
, ATMEL_AES_BUFFER_ORDER
);
767 dd
->buflen
= ATMEL_AES_BUFFER_SIZE
;
768 dd
->buflen
&= ~(AES_BLOCK_SIZE
- 1);
771 dev_err(dd
->dev
, "unable to alloc pages.\n");
778 static void atmel_aes_buff_cleanup(struct atmel_aes_dev
*dd
)
780 free_page((unsigned long)dd
->buf
);
783 static int atmel_aes_crypt(struct ablkcipher_request
*req
, unsigned long mode
)
785 struct atmel_aes_base_ctx
*ctx
= crypto_ablkcipher_ctx(
786 crypto_ablkcipher_reqtfm(req
));
787 struct atmel_aes_reqctx
*rctx
= ablkcipher_request_ctx(req
);
788 struct atmel_aes_dev
*dd
;
790 switch (mode
& AES_FLAGS_OPMODE_MASK
) {
792 ctx
->block_size
= CFB8_BLOCK_SIZE
;
795 case AES_FLAGS_CFB16
:
796 ctx
->block_size
= CFB16_BLOCK_SIZE
;
799 case AES_FLAGS_CFB32
:
800 ctx
->block_size
= CFB32_BLOCK_SIZE
;
803 case AES_FLAGS_CFB64
:
804 ctx
->block_size
= CFB64_BLOCK_SIZE
;
808 ctx
->block_size
= AES_BLOCK_SIZE
;
812 dd
= atmel_aes_find_dev(ctx
);
818 return atmel_aes_handle_queue(dd
, &req
->base
);
821 static bool atmel_aes_filter(struct dma_chan
*chan
, void *slave
)
823 struct at_dma_slave
*sl
= slave
;
825 if (sl
&& sl
->dma_dev
== chan
->device
->dev
) {
833 static int atmel_aes_dma_init(struct atmel_aes_dev
*dd
,
834 struct crypto_platform_data
*pdata
)
836 struct at_dma_slave
*slave
;
841 dma_cap_set(DMA_SLAVE
, mask
);
843 /* Try to grab 2 DMA channels */
844 slave
= &pdata
->dma_slave
->rxdata
;
845 dd
->src
.chan
= dma_request_slave_channel_compat(mask
, atmel_aes_filter
,
846 slave
, dd
->dev
, "tx");
850 slave
= &pdata
->dma_slave
->txdata
;
851 dd
->dst
.chan
= dma_request_slave_channel_compat(mask
, atmel_aes_filter
,
852 slave
, dd
->dev
, "rx");
859 dma_release_channel(dd
->src
.chan
);
861 dev_warn(dd
->dev
, "no DMA channel available\n");
865 static void atmel_aes_dma_cleanup(struct atmel_aes_dev
*dd
)
867 dma_release_channel(dd
->dst
.chan
);
868 dma_release_channel(dd
->src
.chan
);
871 static int atmel_aes_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
874 struct atmel_aes_base_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
876 if (keylen
!= AES_KEYSIZE_128
&& keylen
!= AES_KEYSIZE_192
&&
877 keylen
!= AES_KEYSIZE_256
) {
878 crypto_ablkcipher_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
882 memcpy(ctx
->key
, key
, keylen
);
883 ctx
->keylen
= keylen
;
888 static int atmel_aes_ecb_encrypt(struct ablkcipher_request
*req
)
890 return atmel_aes_crypt(req
, AES_FLAGS_ECB
| AES_FLAGS_ENCRYPT
);
893 static int atmel_aes_ecb_decrypt(struct ablkcipher_request
*req
)
895 return atmel_aes_crypt(req
, AES_FLAGS_ECB
);
898 static int atmel_aes_cbc_encrypt(struct ablkcipher_request
*req
)
900 return atmel_aes_crypt(req
,
901 AES_FLAGS_ENCRYPT
| AES_FLAGS_CBC
);
904 static int atmel_aes_cbc_decrypt(struct ablkcipher_request
*req
)
906 return atmel_aes_crypt(req
,
910 static int atmel_aes_ofb_encrypt(struct ablkcipher_request
*req
)
912 return atmel_aes_crypt(req
,
913 AES_FLAGS_ENCRYPT
| AES_FLAGS_OFB
);
916 static int atmel_aes_ofb_decrypt(struct ablkcipher_request
*req
)
918 return atmel_aes_crypt(req
,
922 static int atmel_aes_cfb_encrypt(struct ablkcipher_request
*req
)
924 return atmel_aes_crypt(req
, AES_FLAGS_CFB128
| AES_FLAGS_ENCRYPT
);
927 static int atmel_aes_cfb_decrypt(struct ablkcipher_request
*req
)
929 return atmel_aes_crypt(req
, AES_FLAGS_CFB128
);
932 static int atmel_aes_cfb64_encrypt(struct ablkcipher_request
*req
)
934 return atmel_aes_crypt(req
, AES_FLAGS_CFB64
| AES_FLAGS_ENCRYPT
);
937 static int atmel_aes_cfb64_decrypt(struct ablkcipher_request
*req
)
939 return atmel_aes_crypt(req
, AES_FLAGS_CFB64
);
942 static int atmel_aes_cfb32_encrypt(struct ablkcipher_request
*req
)
944 return atmel_aes_crypt(req
, AES_FLAGS_CFB32
| AES_FLAGS_ENCRYPT
);
947 static int atmel_aes_cfb32_decrypt(struct ablkcipher_request
*req
)
949 return atmel_aes_crypt(req
, AES_FLAGS_CFB32
);
952 static int atmel_aes_cfb16_encrypt(struct ablkcipher_request
*req
)
954 return atmel_aes_crypt(req
, AES_FLAGS_CFB16
| AES_FLAGS_ENCRYPT
);
957 static int atmel_aes_cfb16_decrypt(struct ablkcipher_request
*req
)
959 return atmel_aes_crypt(req
, AES_FLAGS_CFB16
);
962 static int atmel_aes_cfb8_encrypt(struct ablkcipher_request
*req
)
964 return atmel_aes_crypt(req
, AES_FLAGS_CFB8
| AES_FLAGS_ENCRYPT
);
967 static int atmel_aes_cfb8_decrypt(struct ablkcipher_request
*req
)
969 return atmel_aes_crypt(req
, AES_FLAGS_CFB8
);
972 static int atmel_aes_ctr_encrypt(struct ablkcipher_request
*req
)
974 return atmel_aes_crypt(req
,
975 AES_FLAGS_ENCRYPT
| AES_FLAGS_CTR
);
978 static int atmel_aes_ctr_decrypt(struct ablkcipher_request
*req
)
980 return atmel_aes_crypt(req
,
984 static int atmel_aes_cra_init(struct crypto_tfm
*tfm
)
986 struct atmel_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
988 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct atmel_aes_reqctx
);
989 ctx
->base
.start
= atmel_aes_start
;
994 static void atmel_aes_cra_exit(struct crypto_tfm
*tfm
)
998 static struct crypto_alg aes_algs
[] = {
1000 .cra_name
= "ecb(aes)",
1001 .cra_driver_name
= "atmel-ecb-aes",
1002 .cra_priority
= ATMEL_AES_PRIORITY
,
1003 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1004 .cra_blocksize
= AES_BLOCK_SIZE
,
1005 .cra_ctxsize
= sizeof(struct atmel_aes_ctx
),
1006 .cra_alignmask
= 0xf,
1007 .cra_type
= &crypto_ablkcipher_type
,
1008 .cra_module
= THIS_MODULE
,
1009 .cra_init
= atmel_aes_cra_init
,
1010 .cra_exit
= atmel_aes_cra_exit
,
1011 .cra_u
.ablkcipher
= {
1012 .min_keysize
= AES_MIN_KEY_SIZE
,
1013 .max_keysize
= AES_MAX_KEY_SIZE
,
1014 .setkey
= atmel_aes_setkey
,
1015 .encrypt
= atmel_aes_ecb_encrypt
,
1016 .decrypt
= atmel_aes_ecb_decrypt
,
1020 .cra_name
= "cbc(aes)",
1021 .cra_driver_name
= "atmel-cbc-aes",
1022 .cra_priority
= ATMEL_AES_PRIORITY
,
1023 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1024 .cra_blocksize
= AES_BLOCK_SIZE
,
1025 .cra_ctxsize
= sizeof(struct atmel_aes_ctx
),
1026 .cra_alignmask
= 0xf,
1027 .cra_type
= &crypto_ablkcipher_type
,
1028 .cra_module
= THIS_MODULE
,
1029 .cra_init
= atmel_aes_cra_init
,
1030 .cra_exit
= atmel_aes_cra_exit
,
1031 .cra_u
.ablkcipher
= {
1032 .min_keysize
= AES_MIN_KEY_SIZE
,
1033 .max_keysize
= AES_MAX_KEY_SIZE
,
1034 .ivsize
= AES_BLOCK_SIZE
,
1035 .setkey
= atmel_aes_setkey
,
1036 .encrypt
= atmel_aes_cbc_encrypt
,
1037 .decrypt
= atmel_aes_cbc_decrypt
,
1041 .cra_name
= "ofb(aes)",
1042 .cra_driver_name
= "atmel-ofb-aes",
1043 .cra_priority
= ATMEL_AES_PRIORITY
,
1044 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1045 .cra_blocksize
= AES_BLOCK_SIZE
,
1046 .cra_ctxsize
= sizeof(struct atmel_aes_ctx
),
1047 .cra_alignmask
= 0xf,
1048 .cra_type
= &crypto_ablkcipher_type
,
1049 .cra_module
= THIS_MODULE
,
1050 .cra_init
= atmel_aes_cra_init
,
1051 .cra_exit
= atmel_aes_cra_exit
,
1052 .cra_u
.ablkcipher
= {
1053 .min_keysize
= AES_MIN_KEY_SIZE
,
1054 .max_keysize
= AES_MAX_KEY_SIZE
,
1055 .ivsize
= AES_BLOCK_SIZE
,
1056 .setkey
= atmel_aes_setkey
,
1057 .encrypt
= atmel_aes_ofb_encrypt
,
1058 .decrypt
= atmel_aes_ofb_decrypt
,
1062 .cra_name
= "cfb(aes)",
1063 .cra_driver_name
= "atmel-cfb-aes",
1064 .cra_priority
= ATMEL_AES_PRIORITY
,
1065 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1066 .cra_blocksize
= AES_BLOCK_SIZE
,
1067 .cra_ctxsize
= sizeof(struct atmel_aes_ctx
),
1068 .cra_alignmask
= 0xf,
1069 .cra_type
= &crypto_ablkcipher_type
,
1070 .cra_module
= THIS_MODULE
,
1071 .cra_init
= atmel_aes_cra_init
,
1072 .cra_exit
= atmel_aes_cra_exit
,
1073 .cra_u
.ablkcipher
= {
1074 .min_keysize
= AES_MIN_KEY_SIZE
,
1075 .max_keysize
= AES_MAX_KEY_SIZE
,
1076 .ivsize
= AES_BLOCK_SIZE
,
1077 .setkey
= atmel_aes_setkey
,
1078 .encrypt
= atmel_aes_cfb_encrypt
,
1079 .decrypt
= atmel_aes_cfb_decrypt
,
1083 .cra_name
= "cfb32(aes)",
1084 .cra_driver_name
= "atmel-cfb32-aes",
1085 .cra_priority
= ATMEL_AES_PRIORITY
,
1086 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1087 .cra_blocksize
= CFB32_BLOCK_SIZE
,
1088 .cra_ctxsize
= sizeof(struct atmel_aes_ctx
),
1089 .cra_alignmask
= 0x3,
1090 .cra_type
= &crypto_ablkcipher_type
,
1091 .cra_module
= THIS_MODULE
,
1092 .cra_init
= atmel_aes_cra_init
,
1093 .cra_exit
= atmel_aes_cra_exit
,
1094 .cra_u
.ablkcipher
= {
1095 .min_keysize
= AES_MIN_KEY_SIZE
,
1096 .max_keysize
= AES_MAX_KEY_SIZE
,
1097 .ivsize
= AES_BLOCK_SIZE
,
1098 .setkey
= atmel_aes_setkey
,
1099 .encrypt
= atmel_aes_cfb32_encrypt
,
1100 .decrypt
= atmel_aes_cfb32_decrypt
,
1104 .cra_name
= "cfb16(aes)",
1105 .cra_driver_name
= "atmel-cfb16-aes",
1106 .cra_priority
= ATMEL_AES_PRIORITY
,
1107 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1108 .cra_blocksize
= CFB16_BLOCK_SIZE
,
1109 .cra_ctxsize
= sizeof(struct atmel_aes_ctx
),
1110 .cra_alignmask
= 0x1,
1111 .cra_type
= &crypto_ablkcipher_type
,
1112 .cra_module
= THIS_MODULE
,
1113 .cra_init
= atmel_aes_cra_init
,
1114 .cra_exit
= atmel_aes_cra_exit
,
1115 .cra_u
.ablkcipher
= {
1116 .min_keysize
= AES_MIN_KEY_SIZE
,
1117 .max_keysize
= AES_MAX_KEY_SIZE
,
1118 .ivsize
= AES_BLOCK_SIZE
,
1119 .setkey
= atmel_aes_setkey
,
1120 .encrypt
= atmel_aes_cfb16_encrypt
,
1121 .decrypt
= atmel_aes_cfb16_decrypt
,
1125 .cra_name
= "cfb8(aes)",
1126 .cra_driver_name
= "atmel-cfb8-aes",
1127 .cra_priority
= ATMEL_AES_PRIORITY
,
1128 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1129 .cra_blocksize
= CFB8_BLOCK_SIZE
,
1130 .cra_ctxsize
= sizeof(struct atmel_aes_ctx
),
1131 .cra_alignmask
= 0x0,
1132 .cra_type
= &crypto_ablkcipher_type
,
1133 .cra_module
= THIS_MODULE
,
1134 .cra_init
= atmel_aes_cra_init
,
1135 .cra_exit
= atmel_aes_cra_exit
,
1136 .cra_u
.ablkcipher
= {
1137 .min_keysize
= AES_MIN_KEY_SIZE
,
1138 .max_keysize
= AES_MAX_KEY_SIZE
,
1139 .ivsize
= AES_BLOCK_SIZE
,
1140 .setkey
= atmel_aes_setkey
,
1141 .encrypt
= atmel_aes_cfb8_encrypt
,
1142 .decrypt
= atmel_aes_cfb8_decrypt
,
1146 .cra_name
= "ctr(aes)",
1147 .cra_driver_name
= "atmel-ctr-aes",
1148 .cra_priority
= ATMEL_AES_PRIORITY
,
1149 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1150 .cra_blocksize
= AES_BLOCK_SIZE
,
1151 .cra_ctxsize
= sizeof(struct atmel_aes_ctx
),
1152 .cra_alignmask
= 0xf,
1153 .cra_type
= &crypto_ablkcipher_type
,
1154 .cra_module
= THIS_MODULE
,
1155 .cra_init
= atmel_aes_cra_init
,
1156 .cra_exit
= atmel_aes_cra_exit
,
1157 .cra_u
.ablkcipher
= {
1158 .min_keysize
= AES_MIN_KEY_SIZE
,
1159 .max_keysize
= AES_MAX_KEY_SIZE
,
1160 .ivsize
= AES_BLOCK_SIZE
,
1161 .setkey
= atmel_aes_setkey
,
1162 .encrypt
= atmel_aes_ctr_encrypt
,
1163 .decrypt
= atmel_aes_ctr_decrypt
,
1168 static struct crypto_alg aes_cfb64_alg
= {
1169 .cra_name
= "cfb64(aes)",
1170 .cra_driver_name
= "atmel-cfb64-aes",
1171 .cra_priority
= ATMEL_AES_PRIORITY
,
1172 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1173 .cra_blocksize
= CFB64_BLOCK_SIZE
,
1174 .cra_ctxsize
= sizeof(struct atmel_aes_ctx
),
1175 .cra_alignmask
= 0x7,
1176 .cra_type
= &crypto_ablkcipher_type
,
1177 .cra_module
= THIS_MODULE
,
1178 .cra_init
= atmel_aes_cra_init
,
1179 .cra_exit
= atmel_aes_cra_exit
,
1180 .cra_u
.ablkcipher
= {
1181 .min_keysize
= AES_MIN_KEY_SIZE
,
1182 .max_keysize
= AES_MAX_KEY_SIZE
,
1183 .ivsize
= AES_BLOCK_SIZE
,
1184 .setkey
= atmel_aes_setkey
,
1185 .encrypt
= atmel_aes_cfb64_encrypt
,
1186 .decrypt
= atmel_aes_cfb64_decrypt
,
1190 static void atmel_aes_queue_task(unsigned long data
)
1192 struct atmel_aes_dev
*dd
= (struct atmel_aes_dev
*)data
;
1194 atmel_aes_handle_queue(dd
, NULL
);
1197 static void atmel_aes_done_task(unsigned long data
)
1199 struct atmel_aes_dev
*dd
= (struct atmel_aes_dev
*) data
;
1201 dd
->is_async
= true;
1202 (void)dd
->resume(dd
);
1205 static irqreturn_t
atmel_aes_irq(int irq
, void *dev_id
)
1207 struct atmel_aes_dev
*aes_dd
= dev_id
;
1210 reg
= atmel_aes_read(aes_dd
, AES_ISR
);
1211 if (reg
& atmel_aes_read(aes_dd
, AES_IMR
)) {
1212 atmel_aes_write(aes_dd
, AES_IDR
, reg
);
1213 if (AES_FLAGS_BUSY
& aes_dd
->flags
)
1214 tasklet_schedule(&aes_dd
->done_task
);
1216 dev_warn(aes_dd
->dev
, "AES interrupt when no active requests.\n");
1223 static void atmel_aes_unregister_algs(struct atmel_aes_dev
*dd
)
1227 if (dd
->caps
.has_cfb64
)
1228 crypto_unregister_alg(&aes_cfb64_alg
);
1230 for (i
= 0; i
< ARRAY_SIZE(aes_algs
); i
++)
1231 crypto_unregister_alg(&aes_algs
[i
]);
1234 static int atmel_aes_register_algs(struct atmel_aes_dev
*dd
)
1238 for (i
= 0; i
< ARRAY_SIZE(aes_algs
); i
++) {
1239 err
= crypto_register_alg(&aes_algs
[i
]);
1244 if (dd
->caps
.has_cfb64
) {
1245 err
= crypto_register_alg(&aes_cfb64_alg
);
1247 goto err_aes_cfb64_alg
;
1253 i
= ARRAY_SIZE(aes_algs
);
1255 for (j
= 0; j
< i
; j
++)
1256 crypto_unregister_alg(&aes_algs
[j
]);
1261 static void atmel_aes_get_cap(struct atmel_aes_dev
*dd
)
1263 dd
->caps
.has_dualbuff
= 0;
1264 dd
->caps
.has_cfb64
= 0;
1265 dd
->caps
.max_burst_size
= 1;
1267 /* keep only major version number */
1268 switch (dd
->hw_version
& 0xff0) {
1270 dd
->caps
.has_dualbuff
= 1;
1271 dd
->caps
.has_cfb64
= 1;
1272 dd
->caps
.max_burst_size
= 4;
1275 dd
->caps
.has_dualbuff
= 1;
1276 dd
->caps
.has_cfb64
= 1;
1277 dd
->caps
.max_burst_size
= 4;
1280 dd
->caps
.has_dualbuff
= 1;
1281 dd
->caps
.has_cfb64
= 1;
1282 dd
->caps
.max_burst_size
= 4;
1288 "Unmanaged aes version, set minimum capabilities\n");
1293 #if defined(CONFIG_OF)
1294 static const struct of_device_id atmel_aes_dt_ids
[] = {
1295 { .compatible
= "atmel,at91sam9g46-aes" },
1298 MODULE_DEVICE_TABLE(of
, atmel_aes_dt_ids
);
1300 static struct crypto_platform_data
*atmel_aes_of_init(struct platform_device
*pdev
)
1302 struct device_node
*np
= pdev
->dev
.of_node
;
1303 struct crypto_platform_data
*pdata
;
1306 dev_err(&pdev
->dev
, "device node not found\n");
1307 return ERR_PTR(-EINVAL
);
1310 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1312 dev_err(&pdev
->dev
, "could not allocate memory for pdata\n");
1313 return ERR_PTR(-ENOMEM
);
1316 pdata
->dma_slave
= devm_kzalloc(&pdev
->dev
,
1317 sizeof(*(pdata
->dma_slave
)),
1319 if (!pdata
->dma_slave
) {
1320 dev_err(&pdev
->dev
, "could not allocate memory for dma_slave\n");
1321 devm_kfree(&pdev
->dev
, pdata
);
1322 return ERR_PTR(-ENOMEM
);
1328 static inline struct crypto_platform_data
*atmel_aes_of_init(struct platform_device
*pdev
)
1330 return ERR_PTR(-EINVAL
);
1334 static int atmel_aes_probe(struct platform_device
*pdev
)
1336 struct atmel_aes_dev
*aes_dd
;
1337 struct crypto_platform_data
*pdata
;
1338 struct device
*dev
= &pdev
->dev
;
1339 struct resource
*aes_res
;
1342 pdata
= pdev
->dev
.platform_data
;
1344 pdata
= atmel_aes_of_init(pdev
);
1345 if (IS_ERR(pdata
)) {
1346 err
= PTR_ERR(pdata
);
1351 if (!pdata
->dma_slave
) {
1356 aes_dd
= devm_kzalloc(&pdev
->dev
, sizeof(*aes_dd
), GFP_KERNEL
);
1357 if (aes_dd
== NULL
) {
1358 dev_err(dev
, "unable to alloc data struct.\n");
1365 platform_set_drvdata(pdev
, aes_dd
);
1367 INIT_LIST_HEAD(&aes_dd
->list
);
1368 spin_lock_init(&aes_dd
->lock
);
1370 tasklet_init(&aes_dd
->done_task
, atmel_aes_done_task
,
1371 (unsigned long)aes_dd
);
1372 tasklet_init(&aes_dd
->queue_task
, atmel_aes_queue_task
,
1373 (unsigned long)aes_dd
);
1375 crypto_init_queue(&aes_dd
->queue
, ATMEL_AES_QUEUE_LENGTH
);
1379 /* Get the base address */
1380 aes_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1382 dev_err(dev
, "no MEM resource info\n");
1386 aes_dd
->phys_base
= aes_res
->start
;
1389 aes_dd
->irq
= platform_get_irq(pdev
, 0);
1390 if (aes_dd
->irq
< 0) {
1391 dev_err(dev
, "no IRQ resource info\n");
1396 err
= devm_request_irq(&pdev
->dev
, aes_dd
->irq
, atmel_aes_irq
,
1397 IRQF_SHARED
, "atmel-aes", aes_dd
);
1399 dev_err(dev
, "unable to request aes irq.\n");
1403 /* Initializing the clock */
1404 aes_dd
->iclk
= devm_clk_get(&pdev
->dev
, "aes_clk");
1405 if (IS_ERR(aes_dd
->iclk
)) {
1406 dev_err(dev
, "clock initialization failed.\n");
1407 err
= PTR_ERR(aes_dd
->iclk
);
1411 aes_dd
->io_base
= devm_ioremap_resource(&pdev
->dev
, aes_res
);
1412 if (!aes_dd
->io_base
) {
1413 dev_err(dev
, "can't ioremap\n");
1418 err
= atmel_aes_hw_version_init(aes_dd
);
1422 atmel_aes_get_cap(aes_dd
);
1424 err
= atmel_aes_buff_init(aes_dd
);
1428 err
= atmel_aes_dma_init(aes_dd
, pdata
);
1432 spin_lock(&atmel_aes
.lock
);
1433 list_add_tail(&aes_dd
->list
, &atmel_aes
.dev_list
);
1434 spin_unlock(&atmel_aes
.lock
);
1436 err
= atmel_aes_register_algs(aes_dd
);
1440 dev_info(dev
, "Atmel AES - Using %s, %s for DMA transfers\n",
1441 dma_chan_name(aes_dd
->src
.chan
),
1442 dma_chan_name(aes_dd
->dst
.chan
));
1447 spin_lock(&atmel_aes
.lock
);
1448 list_del(&aes_dd
->list
);
1449 spin_unlock(&atmel_aes
.lock
);
1450 atmel_aes_dma_cleanup(aes_dd
);
1452 atmel_aes_buff_cleanup(aes_dd
);
1455 tasklet_kill(&aes_dd
->done_task
);
1456 tasklet_kill(&aes_dd
->queue_task
);
1458 dev_err(dev
, "initialization failed.\n");
1463 static int atmel_aes_remove(struct platform_device
*pdev
)
1465 static struct atmel_aes_dev
*aes_dd
;
1467 aes_dd
= platform_get_drvdata(pdev
);
1470 spin_lock(&atmel_aes
.lock
);
1471 list_del(&aes_dd
->list
);
1472 spin_unlock(&atmel_aes
.lock
);
1474 atmel_aes_unregister_algs(aes_dd
);
1476 tasklet_kill(&aes_dd
->done_task
);
1477 tasklet_kill(&aes_dd
->queue_task
);
1479 atmel_aes_dma_cleanup(aes_dd
);
1480 atmel_aes_buff_cleanup(aes_dd
);
1485 static struct platform_driver atmel_aes_driver
= {
1486 .probe
= atmel_aes_probe
,
1487 .remove
= atmel_aes_remove
,
1489 .name
= "atmel_aes",
1490 .of_match_table
= of_match_ptr(atmel_aes_dt_ids
),
1494 module_platform_driver(atmel_aes_driver
);
1496 MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
1497 MODULE_LICENSE("GPL v2");
1498 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");