4 * Support for ATMEL AES HW acceleration.
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
13 * Some ideas are from omap-aes.c driver.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
23 #include <linux/hw_random.h>
24 #include <linux/platform_device.h>
26 #include <linux/device.h>
27 #include <linux/init.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/scatterlist.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/of_device.h>
34 #include <linux/delay.h>
35 #include <linux/crypto.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/algapi.h>
38 #include <crypto/aes.h>
39 #include <linux/platform_data/crypto-atmel.h>
40 #include <dt-bindings/dma/at91.h>
41 #include "atmel-aes-regs.h"
43 #define ATMEL_AES_PRIORITY 300
45 #define CFB8_BLOCK_SIZE 1
46 #define CFB16_BLOCK_SIZE 2
47 #define CFB32_BLOCK_SIZE 4
48 #define CFB64_BLOCK_SIZE 8
51 /* Reserve bits [18:16] [14:12] [0] for mode (same as for AES_MR) */
52 #define AES_FLAGS_ENCRYPT AES_MR_CYPHER_ENC
53 #define AES_FLAGS_OPMODE_MASK (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
54 #define AES_FLAGS_ECB AES_MR_OPMOD_ECB
55 #define AES_FLAGS_CBC AES_MR_OPMOD_CBC
56 #define AES_FLAGS_OFB AES_MR_OPMOD_OFB
57 #define AES_FLAGS_CFB128 (AES_MR_OPMOD_CFB | AES_MR_CFBS_128b)
58 #define AES_FLAGS_CFB64 (AES_MR_OPMOD_CFB | AES_MR_CFBS_64b)
59 #define AES_FLAGS_CFB32 (AES_MR_OPMOD_CFB | AES_MR_CFBS_32b)
60 #define AES_FLAGS_CFB16 (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b)
61 #define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
62 #define AES_FLAGS_CTR AES_MR_OPMOD_CTR
64 #define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \
67 #define AES_FLAGS_INIT BIT(2)
68 #define AES_FLAGS_BUSY BIT(3)
69 #define AES_FLAGS_FAST BIT(5)
71 #define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY)
73 #define ATMEL_AES_QUEUE_LENGTH 50
75 #define ATMEL_AES_DMA_THRESHOLD 16
78 struct atmel_aes_caps
{
87 typedef int (*atmel_aes_fn_t
)(struct atmel_aes_dev
*);
90 struct atmel_aes_base_ctx
{
91 struct atmel_aes_dev
*dd
;
95 u32 key
[AES_KEYSIZE_256
/ sizeof(u32
)];
100 struct atmel_aes_ctx
{
101 struct atmel_aes_base_ctx base
;
104 struct atmel_aes_reqctx
{
108 struct atmel_aes_dma
{
109 struct dma_chan
*chan
;
110 struct dma_slave_config dma_conf
;
113 struct atmel_aes_dev
{
114 struct list_head list
;
115 unsigned long phys_base
;
116 void __iomem
*io_base
;
118 struct crypto_async_request
*areq
;
119 struct atmel_aes_base_ctx
*ctx
;
122 atmel_aes_fn_t resume
;
131 struct crypto_queue queue
;
133 struct tasklet_struct done_task
;
134 struct tasklet_struct queue_task
;
138 struct scatterlist
*in_sg
;
139 unsigned int nb_in_sg
;
141 struct scatterlist
*out_sg
;
142 unsigned int nb_out_sg
;
151 dma_addr_t dma_addr_in
;
152 struct atmel_aes_dma dma_lch_in
;
156 dma_addr_t dma_addr_out
;
157 struct atmel_aes_dma dma_lch_out
;
159 struct atmel_aes_caps caps
;
164 struct atmel_aes_drv
{
165 struct list_head dev_list
;
169 static struct atmel_aes_drv atmel_aes
= {
170 .dev_list
= LIST_HEAD_INIT(atmel_aes
.dev_list
),
171 .lock
= __SPIN_LOCK_UNLOCKED(atmel_aes
.lock
),
174 static int atmel_aes_sg_length(struct ablkcipher_request
*req
,
175 struct scatterlist
*sg
)
177 unsigned int total
= req
->nbytes
;
180 struct scatterlist
*sg_list
;
187 len
= min(sg_list
->length
, total
);
192 sg_list
= sg_next(sg_list
);
200 static int atmel_aes_sg_copy(struct scatterlist
**sg
, size_t *offset
,
201 void *buf
, size_t buflen
, size_t total
, int out
)
203 size_t count
, off
= 0;
205 while (buflen
&& total
) {
206 count
= min((*sg
)->length
- *offset
, total
);
207 count
= min(count
, buflen
);
212 scatterwalk_map_and_copy(buf
+ off
, *sg
, *offset
, count
, out
);
219 if (*offset
== (*sg
)->length
) {
231 static inline u32
atmel_aes_read(struct atmel_aes_dev
*dd
, u32 offset
)
233 return readl_relaxed(dd
->io_base
+ offset
);
236 static inline void atmel_aes_write(struct atmel_aes_dev
*dd
,
237 u32 offset
, u32 value
)
239 writel_relaxed(value
, dd
->io_base
+ offset
);
242 static void atmel_aes_read_n(struct atmel_aes_dev
*dd
, u32 offset
,
243 u32
*value
, int count
)
245 for (; count
--; value
++, offset
+= 4)
246 *value
= atmel_aes_read(dd
, offset
);
249 static void atmel_aes_write_n(struct atmel_aes_dev
*dd
, u32 offset
,
250 const u32
*value
, int count
)
252 for (; count
--; value
++, offset
+= 4)
253 atmel_aes_write(dd
, offset
, *value
);
256 static struct atmel_aes_dev
*atmel_aes_find_dev(struct atmel_aes_base_ctx
*ctx
)
258 struct atmel_aes_dev
*aes_dd
= NULL
;
259 struct atmel_aes_dev
*tmp
;
261 spin_lock_bh(&atmel_aes
.lock
);
263 list_for_each_entry(tmp
, &atmel_aes
.dev_list
, list
) {
272 spin_unlock_bh(&atmel_aes
.lock
);
277 static int atmel_aes_hw_init(struct atmel_aes_dev
*dd
)
281 err
= clk_prepare_enable(dd
->iclk
);
285 if (!(dd
->flags
& AES_FLAGS_INIT
)) {
286 atmel_aes_write(dd
, AES_CR
, AES_CR_SWRST
);
287 atmel_aes_write(dd
, AES_MR
, 0xE << AES_MR_CKEY_OFFSET
);
288 dd
->flags
|= AES_FLAGS_INIT
;
294 static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev
*dd
)
296 return atmel_aes_read(dd
, AES_HW_VERSION
) & 0x00000fff;
299 static int atmel_aes_hw_version_init(struct atmel_aes_dev
*dd
)
303 err
= atmel_aes_hw_init(dd
);
307 dd
->hw_version
= atmel_aes_get_version(dd
);
309 dev_info(dd
->dev
, "version: 0x%x\n", dd
->hw_version
);
311 clk_disable_unprepare(dd
->iclk
);
315 static inline void atmel_aes_set_mode(struct atmel_aes_dev
*dd
,
316 const struct atmel_aes_reqctx
*rctx
)
318 /* Clear all but persistent flags and set request flags. */
319 dd
->flags
= (dd
->flags
& AES_FLAGS_PERSISTENT
) | rctx
->mode
;
322 static inline int atmel_aes_complete(struct atmel_aes_dev
*dd
, int err
)
324 clk_disable_unprepare(dd
->iclk
);
325 dd
->flags
&= ~AES_FLAGS_BUSY
;
328 dd
->areq
->complete(dd
->areq
, err
);
330 tasklet_schedule(&dd
->queue_task
);
335 static void atmel_aes_dma_callback(void *data
)
337 struct atmel_aes_dev
*dd
= data
;
340 (void)dd
->resume(dd
);
343 static int atmel_aes_crypt_dma(struct atmel_aes_dev
*dd
,
344 dma_addr_t dma_addr_in
, dma_addr_t dma_addr_out
, int length
)
346 struct scatterlist sg
[2];
347 struct dma_async_tx_descriptor
*in_desc
, *out_desc
;
348 enum dma_slave_buswidth addr_width
;
351 switch (dd
->ctx
->block_size
) {
352 case CFB8_BLOCK_SIZE
:
353 addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
357 case CFB16_BLOCK_SIZE
:
358 addr_width
= DMA_SLAVE_BUSWIDTH_2_BYTES
;
362 case CFB32_BLOCK_SIZE
:
363 case CFB64_BLOCK_SIZE
:
364 addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
369 addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
370 maxburst
= dd
->caps
.max_burst_size
;
377 dd
->dma_size
= length
;
379 dma_sync_single_for_device(dd
->dev
, dma_addr_in
, length
,
381 dma_sync_single_for_device(dd
->dev
, dma_addr_out
, length
,
384 dd
->dma_lch_in
.dma_conf
.dst_addr_width
= addr_width
;
385 dd
->dma_lch_in
.dma_conf
.src_maxburst
= maxburst
;
386 dd
->dma_lch_in
.dma_conf
.dst_maxburst
= maxburst
;
388 dd
->dma_lch_out
.dma_conf
.src_addr_width
= addr_width
;
389 dd
->dma_lch_out
.dma_conf
.src_maxburst
= maxburst
;
390 dd
->dma_lch_out
.dma_conf
.dst_maxburst
= maxburst
;
392 dmaengine_slave_config(dd
->dma_lch_in
.chan
, &dd
->dma_lch_in
.dma_conf
);
393 dmaengine_slave_config(dd
->dma_lch_out
.chan
, &dd
->dma_lch_out
.dma_conf
);
395 sg_init_table(&sg
[0], 1);
396 sg_dma_address(&sg
[0]) = dma_addr_in
;
397 sg_dma_len(&sg
[0]) = length
;
399 sg_init_table(&sg
[1], 1);
400 sg_dma_address(&sg
[1]) = dma_addr_out
;
401 sg_dma_len(&sg
[1]) = length
;
403 in_desc
= dmaengine_prep_slave_sg(dd
->dma_lch_in
.chan
, &sg
[0],
405 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
409 out_desc
= dmaengine_prep_slave_sg(dd
->dma_lch_out
.chan
, &sg
[1],
411 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
415 out_desc
->callback
= atmel_aes_dma_callback
;
416 out_desc
->callback_param
= dd
;
418 dmaengine_submit(out_desc
);
419 dma_async_issue_pending(dd
->dma_lch_out
.chan
);
421 dmaengine_submit(in_desc
);
422 dma_async_issue_pending(dd
->dma_lch_in
.chan
);
427 static int atmel_aes_cpu_complete(struct atmel_aes_dev
*dd
);
429 static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev
*dd
)
431 struct ablkcipher_request
*req
= ablkcipher_request_cast(dd
->areq
);
433 dma_sync_single_for_cpu(dd
->dev
, dd
->dma_addr_in
,
434 dd
->dma_size
, DMA_TO_DEVICE
);
435 dma_sync_single_for_cpu(dd
->dev
, dd
->dma_addr_out
,
436 dd
->dma_size
, DMA_FROM_DEVICE
);
438 /* use cache buffers */
439 dd
->nb_in_sg
= atmel_aes_sg_length(req
, dd
->in_sg
);
443 dd
->nb_out_sg
= atmel_aes_sg_length(req
, dd
->out_sg
);
447 dd
->bufcnt
= sg_copy_to_buffer(dd
->in_sg
, dd
->nb_in_sg
,
448 dd
->buf_in
, dd
->total
);
453 dd
->total
-= dd
->bufcnt
;
455 atmel_aes_write(dd
, AES_IER
, AES_INT_DATARDY
);
456 atmel_aes_write_n(dd
, AES_IDATAR(0), (u32
*) dd
->buf_in
,
459 dd
->resume
= atmel_aes_cpu_complete
;
463 static int atmel_aes_dma_complete(struct atmel_aes_dev
*dd
);
465 static int atmel_aes_crypt_dma_start(struct atmel_aes_dev
*dd
)
467 int err
, fast
= 0, in
, out
;
469 dma_addr_t addr_in
, addr_out
;
471 if ((!dd
->in_offset
) && (!dd
->out_offset
)) {
472 /* check for alignment */
473 in
= IS_ALIGNED((u32
)dd
->in_sg
->offset
, sizeof(u32
)) &&
474 IS_ALIGNED(dd
->in_sg
->length
, dd
->ctx
->block_size
);
475 out
= IS_ALIGNED((u32
)dd
->out_sg
->offset
, sizeof(u32
)) &&
476 IS_ALIGNED(dd
->out_sg
->length
, dd
->ctx
->block_size
);
479 if (sg_dma_len(dd
->in_sg
) != sg_dma_len(dd
->out_sg
))
485 count
= min_t(size_t, dd
->total
, sg_dma_len(dd
->in_sg
));
486 count
= min_t(size_t, count
, sg_dma_len(dd
->out_sg
));
488 err
= dma_map_sg(dd
->dev
, dd
->in_sg
, 1, DMA_TO_DEVICE
);
490 dev_err(dd
->dev
, "dma_map_sg() error\n");
494 err
= dma_map_sg(dd
->dev
, dd
->out_sg
, 1,
497 dev_err(dd
->dev
, "dma_map_sg() error\n");
498 dma_unmap_sg(dd
->dev
, dd
->in_sg
, 1,
503 addr_in
= sg_dma_address(dd
->in_sg
);
504 addr_out
= sg_dma_address(dd
->out_sg
);
506 dd
->flags
|= AES_FLAGS_FAST
;
509 dma_sync_single_for_cpu(dd
->dev
, dd
->dma_addr_in
,
510 dd
->dma_size
, DMA_TO_DEVICE
);
512 /* use cache buffers */
513 count
= atmel_aes_sg_copy(&dd
->in_sg
, &dd
->in_offset
,
514 dd
->buf_in
, dd
->buflen
, dd
->total
, 0);
516 addr_in
= dd
->dma_addr_in
;
517 addr_out
= dd
->dma_addr_out
;
519 dd
->flags
&= ~AES_FLAGS_FAST
;
524 err
= atmel_aes_crypt_dma(dd
, addr_in
, addr_out
, count
);
526 if (err
&& (dd
->flags
& AES_FLAGS_FAST
)) {
527 dma_unmap_sg(dd
->dev
, dd
->in_sg
, 1, DMA_TO_DEVICE
);
528 dma_unmap_sg(dd
->dev
, dd
->out_sg
, 1, DMA_TO_DEVICE
);
531 dd
->resume
= atmel_aes_dma_complete
;
532 return err
? : -EINPROGRESS
;
535 static void atmel_aes_write_ctrl(struct atmel_aes_dev
*dd
, bool use_dma
,
540 /* MR register must be set before IV registers */
541 if (dd
->ctx
->keylen
== AES_KEYSIZE_128
)
542 valmr
|= AES_MR_KEYSIZE_128
;
543 else if (dd
->ctx
->keylen
== AES_KEYSIZE_192
)
544 valmr
|= AES_MR_KEYSIZE_192
;
546 valmr
|= AES_MR_KEYSIZE_256
;
548 valmr
|= dd
->flags
& AES_FLAGS_MODE_MASK
;
551 valmr
|= AES_MR_SMOD_IDATAR0
;
552 if (dd
->caps
.has_dualbuff
)
553 valmr
|= AES_MR_DUALBUFF
;
555 valmr
|= AES_MR_SMOD_AUTO
;
558 atmel_aes_write(dd
, AES_MR
, valmr
);
560 atmel_aes_write_n(dd
, AES_KEYWR(0), dd
->ctx
->key
,
561 dd
->ctx
->keylen
>> 2);
563 if (iv
&& (valmr
& AES_MR_OPMOD_MASK
) != AES_MR_OPMOD_ECB
)
564 atmel_aes_write_n(dd
, AES_IVR(0), iv
, 4);
567 static int atmel_aes_handle_queue(struct atmel_aes_dev
*dd
,
568 struct crypto_async_request
*new_areq
)
570 struct crypto_async_request
*areq
, *backlog
;
571 struct atmel_aes_base_ctx
*ctx
;
575 spin_lock_irqsave(&dd
->lock
, flags
);
577 ret
= crypto_enqueue_request(&dd
->queue
, new_areq
);
578 if (dd
->flags
& AES_FLAGS_BUSY
) {
579 spin_unlock_irqrestore(&dd
->lock
, flags
);
582 backlog
= crypto_get_backlog(&dd
->queue
);
583 areq
= crypto_dequeue_request(&dd
->queue
);
585 dd
->flags
|= AES_FLAGS_BUSY
;
586 spin_unlock_irqrestore(&dd
->lock
, flags
);
592 backlog
->complete(backlog
, -EINPROGRESS
);
594 ctx
= crypto_tfm_ctx(areq
->tfm
);
598 dd
->is_async
= (areq
!= new_areq
);
600 err
= ctx
->start(dd
);
601 return (dd
->is_async
) ? ret
: err
;
604 static int atmel_aes_start(struct atmel_aes_dev
*dd
)
606 struct ablkcipher_request
*req
= ablkcipher_request_cast(dd
->areq
);
607 struct atmel_aes_reqctx
*rctx
;
611 /* assign new request to device */
612 dd
->total
= req
->nbytes
;
614 dd
->in_sg
= req
->src
;
616 dd
->out_sg
= req
->dst
;
618 rctx
= ablkcipher_request_ctx(req
);
619 atmel_aes_set_mode(dd
, rctx
);
621 err
= atmel_aes_hw_init(dd
);
623 use_dma
= (dd
->total
> ATMEL_AES_DMA_THRESHOLD
);
624 atmel_aes_write_ctrl(dd
, use_dma
, req
->info
);
626 err
= atmel_aes_crypt_dma_start(dd
);
628 err
= atmel_aes_crypt_cpu_start(dd
);
630 if (err
&& err
!= -EINPROGRESS
) {
631 /* aes_task will not finish it, so do it here */
632 return atmel_aes_complete(dd
, err
);
638 static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev
*dd
)
643 if (dd
->flags
& AES_FLAGS_FAST
) {
644 dma_unmap_sg(dd
->dev
, dd
->out_sg
, 1, DMA_FROM_DEVICE
);
645 dma_unmap_sg(dd
->dev
, dd
->in_sg
, 1, DMA_TO_DEVICE
);
647 dma_sync_single_for_cpu(dd
->dev
, dd
->dma_addr_out
,
648 dd
->dma_size
, DMA_FROM_DEVICE
);
651 count
= atmel_aes_sg_copy(&dd
->out_sg
, &dd
->out_offset
,
652 dd
->buf_out
, dd
->buflen
,
654 if (count
!= dd
->dma_size
) {
656 pr_err("not all data converted: %zu\n", count
);
664 static int atmel_aes_buff_init(struct atmel_aes_dev
*dd
)
668 dd
->buf_in
= (void *)__get_free_pages(GFP_KERNEL
, 0);
669 dd
->buf_out
= (void *)__get_free_pages(GFP_KERNEL
, 0);
670 dd
->buflen
= PAGE_SIZE
;
671 dd
->buflen
&= ~(AES_BLOCK_SIZE
- 1);
673 if (!dd
->buf_in
|| !dd
->buf_out
) {
674 dev_err(dd
->dev
, "unable to alloc pages.\n");
679 dd
->dma_addr_in
= dma_map_single(dd
->dev
, dd
->buf_in
,
680 dd
->buflen
, DMA_TO_DEVICE
);
681 if (dma_mapping_error(dd
->dev
, dd
->dma_addr_in
)) {
682 dev_err(dd
->dev
, "dma %zd bytes error\n", dd
->buflen
);
687 dd
->dma_addr_out
= dma_map_single(dd
->dev
, dd
->buf_out
,
688 dd
->buflen
, DMA_FROM_DEVICE
);
689 if (dma_mapping_error(dd
->dev
, dd
->dma_addr_out
)) {
690 dev_err(dd
->dev
, "dma %zd bytes error\n", dd
->buflen
);
698 dma_unmap_single(dd
->dev
, dd
->dma_addr_in
, dd
->buflen
,
702 free_page((unsigned long)dd
->buf_out
);
703 free_page((unsigned long)dd
->buf_in
);
705 pr_err("error: %d\n", err
);
709 static void atmel_aes_buff_cleanup(struct atmel_aes_dev
*dd
)
711 dma_unmap_single(dd
->dev
, dd
->dma_addr_out
, dd
->buflen
,
713 dma_unmap_single(dd
->dev
, dd
->dma_addr_in
, dd
->buflen
,
715 free_page((unsigned long)dd
->buf_out
);
716 free_page((unsigned long)dd
->buf_in
);
719 static int atmel_aes_crypt(struct ablkcipher_request
*req
, unsigned long mode
)
721 struct atmel_aes_base_ctx
*ctx
= crypto_ablkcipher_ctx(
722 crypto_ablkcipher_reqtfm(req
));
723 struct atmel_aes_reqctx
*rctx
= ablkcipher_request_ctx(req
);
724 struct atmel_aes_dev
*dd
;
726 switch (mode
& AES_FLAGS_OPMODE_MASK
) {
728 ctx
->block_size
= CFB8_BLOCK_SIZE
;
731 case AES_FLAGS_CFB16
:
732 ctx
->block_size
= CFB16_BLOCK_SIZE
;
735 case AES_FLAGS_CFB32
:
736 ctx
->block_size
= CFB32_BLOCK_SIZE
;
739 case AES_FLAGS_CFB64
:
740 ctx
->block_size
= CFB64_BLOCK_SIZE
;
744 ctx
->block_size
= AES_BLOCK_SIZE
;
748 dd
= atmel_aes_find_dev(ctx
);
754 return atmel_aes_handle_queue(dd
, &req
->base
);
757 static bool atmel_aes_filter(struct dma_chan
*chan
, void *slave
)
759 struct at_dma_slave
*sl
= slave
;
761 if (sl
&& sl
->dma_dev
== chan
->device
->dev
) {
769 static int atmel_aes_dma_init(struct atmel_aes_dev
*dd
,
770 struct crypto_platform_data
*pdata
)
776 dma_cap_set(DMA_SLAVE
, mask
);
778 /* Try to grab 2 DMA channels */
779 dd
->dma_lch_in
.chan
= dma_request_slave_channel_compat(mask
,
780 atmel_aes_filter
, &pdata
->dma_slave
->rxdata
, dd
->dev
, "tx");
781 if (!dd
->dma_lch_in
.chan
)
784 dd
->dma_lch_in
.dma_conf
.direction
= DMA_MEM_TO_DEV
;
785 dd
->dma_lch_in
.dma_conf
.dst_addr
= dd
->phys_base
+
787 dd
->dma_lch_in
.dma_conf
.src_maxburst
= dd
->caps
.max_burst_size
;
788 dd
->dma_lch_in
.dma_conf
.src_addr_width
=
789 DMA_SLAVE_BUSWIDTH_4_BYTES
;
790 dd
->dma_lch_in
.dma_conf
.dst_maxburst
= dd
->caps
.max_burst_size
;
791 dd
->dma_lch_in
.dma_conf
.dst_addr_width
=
792 DMA_SLAVE_BUSWIDTH_4_BYTES
;
793 dd
->dma_lch_in
.dma_conf
.device_fc
= false;
795 dd
->dma_lch_out
.chan
= dma_request_slave_channel_compat(mask
,
796 atmel_aes_filter
, &pdata
->dma_slave
->txdata
, dd
->dev
, "rx");
797 if (!dd
->dma_lch_out
.chan
)
800 dd
->dma_lch_out
.dma_conf
.direction
= DMA_DEV_TO_MEM
;
801 dd
->dma_lch_out
.dma_conf
.src_addr
= dd
->phys_base
+
803 dd
->dma_lch_out
.dma_conf
.src_maxburst
= dd
->caps
.max_burst_size
;
804 dd
->dma_lch_out
.dma_conf
.src_addr_width
=
805 DMA_SLAVE_BUSWIDTH_4_BYTES
;
806 dd
->dma_lch_out
.dma_conf
.dst_maxburst
= dd
->caps
.max_burst_size
;
807 dd
->dma_lch_out
.dma_conf
.dst_addr_width
=
808 DMA_SLAVE_BUSWIDTH_4_BYTES
;
809 dd
->dma_lch_out
.dma_conf
.device_fc
= false;
814 dma_release_channel(dd
->dma_lch_in
.chan
);
816 dev_warn(dd
->dev
, "no DMA channel available\n");
820 static void atmel_aes_dma_cleanup(struct atmel_aes_dev
*dd
)
822 dma_release_channel(dd
->dma_lch_in
.chan
);
823 dma_release_channel(dd
->dma_lch_out
.chan
);
826 static int atmel_aes_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
829 struct atmel_aes_base_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
831 if (keylen
!= AES_KEYSIZE_128
&& keylen
!= AES_KEYSIZE_192
&&
832 keylen
!= AES_KEYSIZE_256
) {
833 crypto_ablkcipher_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
837 memcpy(ctx
->key
, key
, keylen
);
838 ctx
->keylen
= keylen
;
843 static int atmel_aes_ecb_encrypt(struct ablkcipher_request
*req
)
845 return atmel_aes_crypt(req
, AES_FLAGS_ECB
| AES_FLAGS_ENCRYPT
);
848 static int atmel_aes_ecb_decrypt(struct ablkcipher_request
*req
)
850 return atmel_aes_crypt(req
, AES_FLAGS_ECB
);
853 static int atmel_aes_cbc_encrypt(struct ablkcipher_request
*req
)
855 return atmel_aes_crypt(req
,
856 AES_FLAGS_ENCRYPT
| AES_FLAGS_CBC
);
859 static int atmel_aes_cbc_decrypt(struct ablkcipher_request
*req
)
861 return atmel_aes_crypt(req
,
865 static int atmel_aes_ofb_encrypt(struct ablkcipher_request
*req
)
867 return atmel_aes_crypt(req
,
868 AES_FLAGS_ENCRYPT
| AES_FLAGS_OFB
);
871 static int atmel_aes_ofb_decrypt(struct ablkcipher_request
*req
)
873 return atmel_aes_crypt(req
,
877 static int atmel_aes_cfb_encrypt(struct ablkcipher_request
*req
)
879 return atmel_aes_crypt(req
, AES_FLAGS_CFB128
| AES_FLAGS_ENCRYPT
);
882 static int atmel_aes_cfb_decrypt(struct ablkcipher_request
*req
)
884 return atmel_aes_crypt(req
, AES_FLAGS_CFB128
);
887 static int atmel_aes_cfb64_encrypt(struct ablkcipher_request
*req
)
889 return atmel_aes_crypt(req
, AES_FLAGS_CFB64
| AES_FLAGS_ENCRYPT
);
892 static int atmel_aes_cfb64_decrypt(struct ablkcipher_request
*req
)
894 return atmel_aes_crypt(req
, AES_FLAGS_CFB64
);
897 static int atmel_aes_cfb32_encrypt(struct ablkcipher_request
*req
)
899 return atmel_aes_crypt(req
, AES_FLAGS_CFB32
| AES_FLAGS_ENCRYPT
);
902 static int atmel_aes_cfb32_decrypt(struct ablkcipher_request
*req
)
904 return atmel_aes_crypt(req
, AES_FLAGS_CFB32
);
907 static int atmel_aes_cfb16_encrypt(struct ablkcipher_request
*req
)
909 return atmel_aes_crypt(req
, AES_FLAGS_CFB16
| AES_FLAGS_ENCRYPT
);
912 static int atmel_aes_cfb16_decrypt(struct ablkcipher_request
*req
)
914 return atmel_aes_crypt(req
, AES_FLAGS_CFB16
);
917 static int atmel_aes_cfb8_encrypt(struct ablkcipher_request
*req
)
919 return atmel_aes_crypt(req
, AES_FLAGS_CFB8
| AES_FLAGS_ENCRYPT
);
922 static int atmel_aes_cfb8_decrypt(struct ablkcipher_request
*req
)
924 return atmel_aes_crypt(req
, AES_FLAGS_CFB8
);
927 static int atmel_aes_ctr_encrypt(struct ablkcipher_request
*req
)
929 return atmel_aes_crypt(req
,
930 AES_FLAGS_ENCRYPT
| AES_FLAGS_CTR
);
933 static int atmel_aes_ctr_decrypt(struct ablkcipher_request
*req
)
935 return atmel_aes_crypt(req
,
939 static int atmel_aes_cra_init(struct crypto_tfm
*tfm
)
941 struct atmel_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
943 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct atmel_aes_reqctx
);
944 ctx
->base
.start
= atmel_aes_start
;
949 static void atmel_aes_cra_exit(struct crypto_tfm
*tfm
)
953 static struct crypto_alg aes_algs
[] = {
955 .cra_name
= "ecb(aes)",
956 .cra_driver_name
= "atmel-ecb-aes",
957 .cra_priority
= ATMEL_AES_PRIORITY
,
958 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
959 .cra_blocksize
= AES_BLOCK_SIZE
,
960 .cra_ctxsize
= sizeof(struct atmel_aes_ctx
),
961 .cra_alignmask
= 0xf,
962 .cra_type
= &crypto_ablkcipher_type
,
963 .cra_module
= THIS_MODULE
,
964 .cra_init
= atmel_aes_cra_init
,
965 .cra_exit
= atmel_aes_cra_exit
,
966 .cra_u
.ablkcipher
= {
967 .min_keysize
= AES_MIN_KEY_SIZE
,
968 .max_keysize
= AES_MAX_KEY_SIZE
,
969 .setkey
= atmel_aes_setkey
,
970 .encrypt
= atmel_aes_ecb_encrypt
,
971 .decrypt
= atmel_aes_ecb_decrypt
,
975 .cra_name
= "cbc(aes)",
976 .cra_driver_name
= "atmel-cbc-aes",
977 .cra_priority
= ATMEL_AES_PRIORITY
,
978 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
979 .cra_blocksize
= AES_BLOCK_SIZE
,
980 .cra_ctxsize
= sizeof(struct atmel_aes_ctx
),
981 .cra_alignmask
= 0xf,
982 .cra_type
= &crypto_ablkcipher_type
,
983 .cra_module
= THIS_MODULE
,
984 .cra_init
= atmel_aes_cra_init
,
985 .cra_exit
= atmel_aes_cra_exit
,
986 .cra_u
.ablkcipher
= {
987 .min_keysize
= AES_MIN_KEY_SIZE
,
988 .max_keysize
= AES_MAX_KEY_SIZE
,
989 .ivsize
= AES_BLOCK_SIZE
,
990 .setkey
= atmel_aes_setkey
,
991 .encrypt
= atmel_aes_cbc_encrypt
,
992 .decrypt
= atmel_aes_cbc_decrypt
,
996 .cra_name
= "ofb(aes)",
997 .cra_driver_name
= "atmel-ofb-aes",
998 .cra_priority
= ATMEL_AES_PRIORITY
,
999 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1000 .cra_blocksize
= AES_BLOCK_SIZE
,
1001 .cra_ctxsize
= sizeof(struct atmel_aes_ctx
),
1002 .cra_alignmask
= 0xf,
1003 .cra_type
= &crypto_ablkcipher_type
,
1004 .cra_module
= THIS_MODULE
,
1005 .cra_init
= atmel_aes_cra_init
,
1006 .cra_exit
= atmel_aes_cra_exit
,
1007 .cra_u
.ablkcipher
= {
1008 .min_keysize
= AES_MIN_KEY_SIZE
,
1009 .max_keysize
= AES_MAX_KEY_SIZE
,
1010 .ivsize
= AES_BLOCK_SIZE
,
1011 .setkey
= atmel_aes_setkey
,
1012 .encrypt
= atmel_aes_ofb_encrypt
,
1013 .decrypt
= atmel_aes_ofb_decrypt
,
1017 .cra_name
= "cfb(aes)",
1018 .cra_driver_name
= "atmel-cfb-aes",
1019 .cra_priority
= ATMEL_AES_PRIORITY
,
1020 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1021 .cra_blocksize
= AES_BLOCK_SIZE
,
1022 .cra_ctxsize
= sizeof(struct atmel_aes_ctx
),
1023 .cra_alignmask
= 0xf,
1024 .cra_type
= &crypto_ablkcipher_type
,
1025 .cra_module
= THIS_MODULE
,
1026 .cra_init
= atmel_aes_cra_init
,
1027 .cra_exit
= atmel_aes_cra_exit
,
1028 .cra_u
.ablkcipher
= {
1029 .min_keysize
= AES_MIN_KEY_SIZE
,
1030 .max_keysize
= AES_MAX_KEY_SIZE
,
1031 .ivsize
= AES_BLOCK_SIZE
,
1032 .setkey
= atmel_aes_setkey
,
1033 .encrypt
= atmel_aes_cfb_encrypt
,
1034 .decrypt
= atmel_aes_cfb_decrypt
,
1038 .cra_name
= "cfb32(aes)",
1039 .cra_driver_name
= "atmel-cfb32-aes",
1040 .cra_priority
= ATMEL_AES_PRIORITY
,
1041 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1042 .cra_blocksize
= CFB32_BLOCK_SIZE
,
1043 .cra_ctxsize
= sizeof(struct atmel_aes_ctx
),
1044 .cra_alignmask
= 0x3,
1045 .cra_type
= &crypto_ablkcipher_type
,
1046 .cra_module
= THIS_MODULE
,
1047 .cra_init
= atmel_aes_cra_init
,
1048 .cra_exit
= atmel_aes_cra_exit
,
1049 .cra_u
.ablkcipher
= {
1050 .min_keysize
= AES_MIN_KEY_SIZE
,
1051 .max_keysize
= AES_MAX_KEY_SIZE
,
1052 .ivsize
= AES_BLOCK_SIZE
,
1053 .setkey
= atmel_aes_setkey
,
1054 .encrypt
= atmel_aes_cfb32_encrypt
,
1055 .decrypt
= atmel_aes_cfb32_decrypt
,
1059 .cra_name
= "cfb16(aes)",
1060 .cra_driver_name
= "atmel-cfb16-aes",
1061 .cra_priority
= ATMEL_AES_PRIORITY
,
1062 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1063 .cra_blocksize
= CFB16_BLOCK_SIZE
,
1064 .cra_ctxsize
= sizeof(struct atmel_aes_ctx
),
1065 .cra_alignmask
= 0x1,
1066 .cra_type
= &crypto_ablkcipher_type
,
1067 .cra_module
= THIS_MODULE
,
1068 .cra_init
= atmel_aes_cra_init
,
1069 .cra_exit
= atmel_aes_cra_exit
,
1070 .cra_u
.ablkcipher
= {
1071 .min_keysize
= AES_MIN_KEY_SIZE
,
1072 .max_keysize
= AES_MAX_KEY_SIZE
,
1073 .ivsize
= AES_BLOCK_SIZE
,
1074 .setkey
= atmel_aes_setkey
,
1075 .encrypt
= atmel_aes_cfb16_encrypt
,
1076 .decrypt
= atmel_aes_cfb16_decrypt
,
1080 .cra_name
= "cfb8(aes)",
1081 .cra_driver_name
= "atmel-cfb8-aes",
1082 .cra_priority
= ATMEL_AES_PRIORITY
,
1083 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1084 .cra_blocksize
= CFB8_BLOCK_SIZE
,
1085 .cra_ctxsize
= sizeof(struct atmel_aes_ctx
),
1086 .cra_alignmask
= 0x0,
1087 .cra_type
= &crypto_ablkcipher_type
,
1088 .cra_module
= THIS_MODULE
,
1089 .cra_init
= atmel_aes_cra_init
,
1090 .cra_exit
= atmel_aes_cra_exit
,
1091 .cra_u
.ablkcipher
= {
1092 .min_keysize
= AES_MIN_KEY_SIZE
,
1093 .max_keysize
= AES_MAX_KEY_SIZE
,
1094 .ivsize
= AES_BLOCK_SIZE
,
1095 .setkey
= atmel_aes_setkey
,
1096 .encrypt
= atmel_aes_cfb8_encrypt
,
1097 .decrypt
= atmel_aes_cfb8_decrypt
,
1101 .cra_name
= "ctr(aes)",
1102 .cra_driver_name
= "atmel-ctr-aes",
1103 .cra_priority
= ATMEL_AES_PRIORITY
,
1104 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1105 .cra_blocksize
= AES_BLOCK_SIZE
,
1106 .cra_ctxsize
= sizeof(struct atmel_aes_ctx
),
1107 .cra_alignmask
= 0xf,
1108 .cra_type
= &crypto_ablkcipher_type
,
1109 .cra_module
= THIS_MODULE
,
1110 .cra_init
= atmel_aes_cra_init
,
1111 .cra_exit
= atmel_aes_cra_exit
,
1112 .cra_u
.ablkcipher
= {
1113 .min_keysize
= AES_MIN_KEY_SIZE
,
1114 .max_keysize
= AES_MAX_KEY_SIZE
,
1115 .ivsize
= AES_BLOCK_SIZE
,
1116 .setkey
= atmel_aes_setkey
,
1117 .encrypt
= atmel_aes_ctr_encrypt
,
1118 .decrypt
= atmel_aes_ctr_decrypt
,
1123 static struct crypto_alg aes_cfb64_alg
= {
1124 .cra_name
= "cfb64(aes)",
1125 .cra_driver_name
= "atmel-cfb64-aes",
1126 .cra_priority
= ATMEL_AES_PRIORITY
,
1127 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1128 .cra_blocksize
= CFB64_BLOCK_SIZE
,
1129 .cra_ctxsize
= sizeof(struct atmel_aes_ctx
),
1130 .cra_alignmask
= 0x7,
1131 .cra_type
= &crypto_ablkcipher_type
,
1132 .cra_module
= THIS_MODULE
,
1133 .cra_init
= atmel_aes_cra_init
,
1134 .cra_exit
= atmel_aes_cra_exit
,
1135 .cra_u
.ablkcipher
= {
1136 .min_keysize
= AES_MIN_KEY_SIZE
,
1137 .max_keysize
= AES_MAX_KEY_SIZE
,
1138 .ivsize
= AES_BLOCK_SIZE
,
1139 .setkey
= atmel_aes_setkey
,
1140 .encrypt
= atmel_aes_cfb64_encrypt
,
1141 .decrypt
= atmel_aes_cfb64_decrypt
,
1145 static void atmel_aes_queue_task(unsigned long data
)
1147 struct atmel_aes_dev
*dd
= (struct atmel_aes_dev
*)data
;
1149 atmel_aes_handle_queue(dd
, NULL
);
1152 static void atmel_aes_done_task(unsigned long data
)
1154 struct atmel_aes_dev
*dd
= (struct atmel_aes_dev
*) data
;
1156 dd
->is_async
= true;
1157 (void)dd
->resume(dd
);
1160 static int atmel_aes_dma_complete(struct atmel_aes_dev
*dd
)
1164 err
= atmel_aes_crypt_dma_stop(dd
);
1165 if (dd
->total
&& !err
) {
1166 if (dd
->flags
& AES_FLAGS_FAST
) {
1167 dd
->in_sg
= sg_next(dd
->in_sg
);
1168 dd
->out_sg
= sg_next(dd
->out_sg
);
1169 if (!dd
->in_sg
|| !dd
->out_sg
)
1173 err
= atmel_aes_crypt_dma_start(dd
);
1174 if (!err
|| err
== -EINPROGRESS
)
1175 return -EINPROGRESS
; /* DMA started. Not fininishing. */
1178 return atmel_aes_complete(dd
, err
);
1181 static int atmel_aes_cpu_complete(struct atmel_aes_dev
*dd
)
1185 atmel_aes_read_n(dd
, AES_ODATAR(0), (u32
*) dd
->buf_out
,
1188 if (sg_copy_from_buffer(dd
->out_sg
, dd
->nb_out_sg
,
1189 dd
->buf_out
, dd
->bufcnt
))
1194 return atmel_aes_complete(dd
, err
);
1197 static irqreturn_t
atmel_aes_irq(int irq
, void *dev_id
)
1199 struct atmel_aes_dev
*aes_dd
= dev_id
;
1202 reg
= atmel_aes_read(aes_dd
, AES_ISR
);
1203 if (reg
& atmel_aes_read(aes_dd
, AES_IMR
)) {
1204 atmel_aes_write(aes_dd
, AES_IDR
, reg
);
1205 if (AES_FLAGS_BUSY
& aes_dd
->flags
)
1206 tasklet_schedule(&aes_dd
->done_task
);
1208 dev_warn(aes_dd
->dev
, "AES interrupt when no active requests.\n");
1215 static void atmel_aes_unregister_algs(struct atmel_aes_dev
*dd
)
1219 if (dd
->caps
.has_cfb64
)
1220 crypto_unregister_alg(&aes_cfb64_alg
);
1222 for (i
= 0; i
< ARRAY_SIZE(aes_algs
); i
++)
1223 crypto_unregister_alg(&aes_algs
[i
]);
1226 static int atmel_aes_register_algs(struct atmel_aes_dev
*dd
)
1230 for (i
= 0; i
< ARRAY_SIZE(aes_algs
); i
++) {
1231 err
= crypto_register_alg(&aes_algs
[i
]);
1236 if (dd
->caps
.has_cfb64
) {
1237 err
= crypto_register_alg(&aes_cfb64_alg
);
1239 goto err_aes_cfb64_alg
;
1245 i
= ARRAY_SIZE(aes_algs
);
1247 for (j
= 0; j
< i
; j
++)
1248 crypto_unregister_alg(&aes_algs
[j
]);
1253 static void atmel_aes_get_cap(struct atmel_aes_dev
*dd
)
1255 dd
->caps
.has_dualbuff
= 0;
1256 dd
->caps
.has_cfb64
= 0;
1257 dd
->caps
.max_burst_size
= 1;
1259 /* keep only major version number */
1260 switch (dd
->hw_version
& 0xff0) {
1262 dd
->caps
.has_dualbuff
= 1;
1263 dd
->caps
.has_cfb64
= 1;
1264 dd
->caps
.max_burst_size
= 4;
1267 dd
->caps
.has_dualbuff
= 1;
1268 dd
->caps
.has_cfb64
= 1;
1269 dd
->caps
.max_burst_size
= 4;
1272 dd
->caps
.has_dualbuff
= 1;
1273 dd
->caps
.has_cfb64
= 1;
1274 dd
->caps
.max_burst_size
= 4;
1280 "Unmanaged aes version, set minimum capabilities\n");
1285 #if defined(CONFIG_OF)
1286 static const struct of_device_id atmel_aes_dt_ids
[] = {
1287 { .compatible
= "atmel,at91sam9g46-aes" },
1290 MODULE_DEVICE_TABLE(of
, atmel_aes_dt_ids
);
1292 static struct crypto_platform_data
*atmel_aes_of_init(struct platform_device
*pdev
)
1294 struct device_node
*np
= pdev
->dev
.of_node
;
1295 struct crypto_platform_data
*pdata
;
1298 dev_err(&pdev
->dev
, "device node not found\n");
1299 return ERR_PTR(-EINVAL
);
1302 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1304 dev_err(&pdev
->dev
, "could not allocate memory for pdata\n");
1305 return ERR_PTR(-ENOMEM
);
1308 pdata
->dma_slave
= devm_kzalloc(&pdev
->dev
,
1309 sizeof(*(pdata
->dma_slave
)),
1311 if (!pdata
->dma_slave
) {
1312 dev_err(&pdev
->dev
, "could not allocate memory for dma_slave\n");
1313 devm_kfree(&pdev
->dev
, pdata
);
1314 return ERR_PTR(-ENOMEM
);
1320 static inline struct crypto_platform_data
*atmel_aes_of_init(struct platform_device
*pdev
)
1322 return ERR_PTR(-EINVAL
);
1326 static int atmel_aes_probe(struct platform_device
*pdev
)
1328 struct atmel_aes_dev
*aes_dd
;
1329 struct crypto_platform_data
*pdata
;
1330 struct device
*dev
= &pdev
->dev
;
1331 struct resource
*aes_res
;
1334 pdata
= pdev
->dev
.platform_data
;
1336 pdata
= atmel_aes_of_init(pdev
);
1337 if (IS_ERR(pdata
)) {
1338 err
= PTR_ERR(pdata
);
1343 if (!pdata
->dma_slave
) {
1348 aes_dd
= devm_kzalloc(&pdev
->dev
, sizeof(*aes_dd
), GFP_KERNEL
);
1349 if (aes_dd
== NULL
) {
1350 dev_err(dev
, "unable to alloc data struct.\n");
1357 platform_set_drvdata(pdev
, aes_dd
);
1359 INIT_LIST_HEAD(&aes_dd
->list
);
1360 spin_lock_init(&aes_dd
->lock
);
1362 tasklet_init(&aes_dd
->done_task
, atmel_aes_done_task
,
1363 (unsigned long)aes_dd
);
1364 tasklet_init(&aes_dd
->queue_task
, atmel_aes_queue_task
,
1365 (unsigned long)aes_dd
);
1367 crypto_init_queue(&aes_dd
->queue
, ATMEL_AES_QUEUE_LENGTH
);
1371 /* Get the base address */
1372 aes_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1374 dev_err(dev
, "no MEM resource info\n");
1378 aes_dd
->phys_base
= aes_res
->start
;
1381 aes_dd
->irq
= platform_get_irq(pdev
, 0);
1382 if (aes_dd
->irq
< 0) {
1383 dev_err(dev
, "no IRQ resource info\n");
1388 err
= devm_request_irq(&pdev
->dev
, aes_dd
->irq
, atmel_aes_irq
,
1389 IRQF_SHARED
, "atmel-aes", aes_dd
);
1391 dev_err(dev
, "unable to request aes irq.\n");
1395 /* Initializing the clock */
1396 aes_dd
->iclk
= devm_clk_get(&pdev
->dev
, "aes_clk");
1397 if (IS_ERR(aes_dd
->iclk
)) {
1398 dev_err(dev
, "clock initialization failed.\n");
1399 err
= PTR_ERR(aes_dd
->iclk
);
1403 aes_dd
->io_base
= devm_ioremap_resource(&pdev
->dev
, aes_res
);
1404 if (!aes_dd
->io_base
) {
1405 dev_err(dev
, "can't ioremap\n");
1410 err
= atmel_aes_hw_version_init(aes_dd
);
1414 atmel_aes_get_cap(aes_dd
);
1416 err
= atmel_aes_buff_init(aes_dd
);
1420 err
= atmel_aes_dma_init(aes_dd
, pdata
);
1424 spin_lock(&atmel_aes
.lock
);
1425 list_add_tail(&aes_dd
->list
, &atmel_aes
.dev_list
);
1426 spin_unlock(&atmel_aes
.lock
);
1428 err
= atmel_aes_register_algs(aes_dd
);
1432 dev_info(dev
, "Atmel AES - Using %s, %s for DMA transfers\n",
1433 dma_chan_name(aes_dd
->dma_lch_in
.chan
),
1434 dma_chan_name(aes_dd
->dma_lch_out
.chan
));
1439 spin_lock(&atmel_aes
.lock
);
1440 list_del(&aes_dd
->list
);
1441 spin_unlock(&atmel_aes
.lock
);
1442 atmel_aes_dma_cleanup(aes_dd
);
1444 atmel_aes_buff_cleanup(aes_dd
);
1447 tasklet_kill(&aes_dd
->done_task
);
1448 tasklet_kill(&aes_dd
->queue_task
);
1450 dev_err(dev
, "initialization failed.\n");
1455 static int atmel_aes_remove(struct platform_device
*pdev
)
1457 static struct atmel_aes_dev
*aes_dd
;
1459 aes_dd
= platform_get_drvdata(pdev
);
1462 spin_lock(&atmel_aes
.lock
);
1463 list_del(&aes_dd
->list
);
1464 spin_unlock(&atmel_aes
.lock
);
1466 atmel_aes_unregister_algs(aes_dd
);
1468 tasklet_kill(&aes_dd
->done_task
);
1469 tasklet_kill(&aes_dd
->queue_task
);
1471 atmel_aes_dma_cleanup(aes_dd
);
1472 atmel_aes_buff_cleanup(aes_dd
);
1477 static struct platform_driver atmel_aes_driver
= {
1478 .probe
= atmel_aes_probe
,
1479 .remove
= atmel_aes_remove
,
1481 .name
= "atmel_aes",
1482 .of_match_table
= of_match_ptr(atmel_aes_dt_ids
),
1486 module_platform_driver(atmel_aes_driver
);
1488 MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
1489 MODULE_LICENSE("GPL v2");
1490 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");