Commit | Line | Data |
---|---|---|
bd3c7b5c NR |
1 | /* |
2 | * Cryptographic API. | |
3 | * | |
4 | * Support for ATMEL AES HW acceleration. | |
5 | * | |
6 | * Copyright (c) 2012 Eukréa Electromatique - ATMEL | |
7 | * Author: Nicolas Royer <nicolas@eukrea.com> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as published | |
11 | * by the Free Software Foundation. | |
12 | * | |
13 | * Some ideas are from omap-aes.c driver. | |
14 | */ | |
15 | ||
16 | ||
17 | #include <linux/kernel.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/err.h> | |
21 | #include <linux/clk.h> | |
22 | #include <linux/io.h> | |
23 | #include <linux/hw_random.h> | |
24 | #include <linux/platform_device.h> | |
25 | ||
26 | #include <linux/device.h> | |
bd3c7b5c NR |
27 | #include <linux/init.h> |
28 | #include <linux/errno.h> | |
29 | #include <linux/interrupt.h> | |
bd3c7b5c | 30 | #include <linux/irq.h> |
bd3c7b5c NR |
31 | #include <linux/scatterlist.h> |
32 | #include <linux/dma-mapping.h> | |
be943c7d | 33 | #include <linux/of_device.h> |
bd3c7b5c NR |
34 | #include <linux/delay.h> |
35 | #include <linux/crypto.h> | |
36 | #include <linux/cryptohash.h> | |
37 | #include <crypto/scatterwalk.h> | |
38 | #include <crypto/algapi.h> | |
39 | #include <crypto/aes.h> | |
40 | #include <crypto/hash.h> | |
41 | #include <crypto/internal/hash.h> | |
cadc4ab8 | 42 | #include <linux/platform_data/crypto-atmel.h> |
be943c7d | 43 | #include <dt-bindings/dma/at91.h> |
bd3c7b5c NR |
44 | #include "atmel-aes-regs.h" |
45 | ||
46 | #define CFB8_BLOCK_SIZE 1 | |
47 | #define CFB16_BLOCK_SIZE 2 | |
48 | #define CFB32_BLOCK_SIZE 4 | |
49 | #define CFB64_BLOCK_SIZE 8 | |
50 | ||
51 | /* AES flags */ | |
cadc4ab8 | 52 | #define AES_FLAGS_MODE_MASK 0x03ff |
bd3c7b5c NR |
53 | #define AES_FLAGS_ENCRYPT BIT(0) |
54 | #define AES_FLAGS_CBC BIT(1) | |
55 | #define AES_FLAGS_CFB BIT(2) | |
56 | #define AES_FLAGS_CFB8 BIT(3) | |
57 | #define AES_FLAGS_CFB16 BIT(4) | |
58 | #define AES_FLAGS_CFB32 BIT(5) | |
59 | #define AES_FLAGS_CFB64 BIT(6) | |
cadc4ab8 NR |
60 | #define AES_FLAGS_CFB128 BIT(7) |
61 | #define AES_FLAGS_OFB BIT(8) | |
62 | #define AES_FLAGS_CTR BIT(9) | |
bd3c7b5c NR |
63 | |
64 | #define AES_FLAGS_INIT BIT(16) | |
65 | #define AES_FLAGS_DMA BIT(17) | |
66 | #define AES_FLAGS_BUSY BIT(18) | |
cadc4ab8 | 67 | #define AES_FLAGS_FAST BIT(19) |
bd3c7b5c | 68 | |
cadc4ab8 | 69 | #define ATMEL_AES_QUEUE_LENGTH 50 |
bd3c7b5c NR |
70 | |
71 | #define ATMEL_AES_DMA_THRESHOLD 16 | |
72 | ||
73 | ||
cadc4ab8 NR |
74 | struct atmel_aes_caps { |
75 | bool has_dualbuff; | |
76 | bool has_cfb64; | |
77 | u32 max_burst_size; | |
78 | }; | |
79 | ||
bd3c7b5c NR |
80 | struct atmel_aes_dev; |
81 | ||
82 | struct atmel_aes_ctx { | |
83 | struct atmel_aes_dev *dd; | |
84 | ||
85 | int keylen; | |
86 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; | |
cadc4ab8 NR |
87 | |
88 | u16 block_size; | |
bd3c7b5c NR |
89 | }; |
90 | ||
91 | struct atmel_aes_reqctx { | |
92 | unsigned long mode; | |
93 | }; | |
94 | ||
95 | struct atmel_aes_dma { | |
96 | struct dma_chan *chan; | |
97 | struct dma_slave_config dma_conf; | |
98 | }; | |
99 | ||
100 | struct atmel_aes_dev { | |
101 | struct list_head list; | |
102 | unsigned long phys_base; | |
103 | void __iomem *io_base; | |
104 | ||
105 | struct atmel_aes_ctx *ctx; | |
106 | struct device *dev; | |
107 | struct clk *iclk; | |
108 | int irq; | |
109 | ||
110 | unsigned long flags; | |
111 | int err; | |
112 | ||
113 | spinlock_t lock; | |
114 | struct crypto_queue queue; | |
115 | ||
116 | struct tasklet_struct done_task; | |
117 | struct tasklet_struct queue_task; | |
118 | ||
119 | struct ablkcipher_request *req; | |
120 | size_t total; | |
121 | ||
122 | struct scatterlist *in_sg; | |
123 | unsigned int nb_in_sg; | |
cadc4ab8 | 124 | size_t in_offset; |
bd3c7b5c NR |
125 | struct scatterlist *out_sg; |
126 | unsigned int nb_out_sg; | |
cadc4ab8 | 127 | size_t out_offset; |
bd3c7b5c NR |
128 | |
129 | size_t bufcnt; | |
cadc4ab8 NR |
130 | size_t buflen; |
131 | size_t dma_size; | |
bd3c7b5c | 132 | |
cadc4ab8 NR |
133 | void *buf_in; |
134 | int dma_in; | |
135 | dma_addr_t dma_addr_in; | |
bd3c7b5c NR |
136 | struct atmel_aes_dma dma_lch_in; |
137 | ||
cadc4ab8 NR |
138 | void *buf_out; |
139 | int dma_out; | |
140 | dma_addr_t dma_addr_out; | |
bd3c7b5c NR |
141 | struct atmel_aes_dma dma_lch_out; |
142 | ||
cadc4ab8 NR |
143 | struct atmel_aes_caps caps; |
144 | ||
bd3c7b5c NR |
145 | u32 hw_version; |
146 | }; | |
147 | ||
148 | struct atmel_aes_drv { | |
149 | struct list_head dev_list; | |
150 | spinlock_t lock; | |
151 | }; | |
152 | ||
153 | static struct atmel_aes_drv atmel_aes = { | |
154 | .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list), | |
155 | .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock), | |
156 | }; | |
157 | ||
158 | static int atmel_aes_sg_length(struct ablkcipher_request *req, | |
159 | struct scatterlist *sg) | |
160 | { | |
161 | unsigned int total = req->nbytes; | |
162 | int sg_nb; | |
163 | unsigned int len; | |
164 | struct scatterlist *sg_list; | |
165 | ||
166 | sg_nb = 0; | |
167 | sg_list = sg; | |
168 | total = req->nbytes; | |
169 | ||
170 | while (total) { | |
171 | len = min(sg_list->length, total); | |
172 | ||
173 | sg_nb++; | |
174 | total -= len; | |
175 | ||
176 | sg_list = sg_next(sg_list); | |
177 | if (!sg_list) | |
178 | total = 0; | |
179 | } | |
180 | ||
181 | return sg_nb; | |
182 | } | |
183 | ||
cadc4ab8 NR |
184 | static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset, |
185 | void *buf, size_t buflen, size_t total, int out) | |
186 | { | |
187 | unsigned int count, off = 0; | |
188 | ||
189 | while (buflen && total) { | |
190 | count = min((*sg)->length - *offset, total); | |
191 | count = min(count, buflen); | |
192 | ||
193 | if (!count) | |
194 | return off; | |
195 | ||
196 | scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out); | |
197 | ||
198 | off += count; | |
199 | buflen -= count; | |
200 | *offset += count; | |
201 | total -= count; | |
202 | ||
203 | if (*offset == (*sg)->length) { | |
204 | *sg = sg_next(*sg); | |
205 | if (*sg) | |
206 | *offset = 0; | |
207 | else | |
208 | total = 0; | |
209 | } | |
210 | } | |
211 | ||
212 | return off; | |
213 | } | |
214 | ||
bd3c7b5c NR |
215 | static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) |
216 | { | |
217 | return readl_relaxed(dd->io_base + offset); | |
218 | } | |
219 | ||
220 | static inline void atmel_aes_write(struct atmel_aes_dev *dd, | |
221 | u32 offset, u32 value) | |
222 | { | |
223 | writel_relaxed(value, dd->io_base + offset); | |
224 | } | |
225 | ||
226 | static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset, | |
227 | u32 *value, int count) | |
228 | { | |
229 | for (; count--; value++, offset += 4) | |
230 | *value = atmel_aes_read(dd, offset); | |
231 | } | |
232 | ||
233 | static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset, | |
234 | u32 *value, int count) | |
235 | { | |
236 | for (; count--; value++, offset += 4) | |
237 | atmel_aes_write(dd, offset, *value); | |
238 | } | |
239 | ||
bd3c7b5c NR |
240 | static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx) |
241 | { | |
242 | struct atmel_aes_dev *aes_dd = NULL; | |
243 | struct atmel_aes_dev *tmp; | |
244 | ||
245 | spin_lock_bh(&atmel_aes.lock); | |
246 | if (!ctx->dd) { | |
247 | list_for_each_entry(tmp, &atmel_aes.dev_list, list) { | |
248 | aes_dd = tmp; | |
249 | break; | |
250 | } | |
251 | ctx->dd = aes_dd; | |
252 | } else { | |
253 | aes_dd = ctx->dd; | |
254 | } | |
255 | ||
256 | spin_unlock_bh(&atmel_aes.lock); | |
257 | ||
258 | return aes_dd; | |
259 | } | |
260 | ||
261 | static int atmel_aes_hw_init(struct atmel_aes_dev *dd) | |
262 | { | |
263 | clk_prepare_enable(dd->iclk); | |
264 | ||
265 | if (!(dd->flags & AES_FLAGS_INIT)) { | |
266 | atmel_aes_write(dd, AES_CR, AES_CR_SWRST); | |
cadc4ab8 | 267 | atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET); |
bd3c7b5c NR |
268 | dd->flags |= AES_FLAGS_INIT; |
269 | dd->err = 0; | |
270 | } | |
271 | ||
272 | return 0; | |
273 | } | |
274 | ||
cadc4ab8 NR |
275 | static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd) |
276 | { | |
277 | return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff; | |
278 | } | |
279 | ||
bd3c7b5c NR |
280 | static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd) |
281 | { | |
282 | atmel_aes_hw_init(dd); | |
283 | ||
cadc4ab8 NR |
284 | dd->hw_version = atmel_aes_get_version(dd); |
285 | ||
286 | dev_info(dd->dev, | |
287 | "version: 0x%x\n", dd->hw_version); | |
bd3c7b5c NR |
288 | |
289 | clk_disable_unprepare(dd->iclk); | |
290 | } | |
291 | ||
292 | static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err) | |
293 | { | |
294 | struct ablkcipher_request *req = dd->req; | |
295 | ||
296 | clk_disable_unprepare(dd->iclk); | |
297 | dd->flags &= ~AES_FLAGS_BUSY; | |
298 | ||
299 | req->base.complete(&req->base, err); | |
300 | } | |
301 | ||
302 | static void atmel_aes_dma_callback(void *data) | |
303 | { | |
304 | struct atmel_aes_dev *dd = data; | |
305 | ||
306 | /* dma_lch_out - completed */ | |
307 | tasklet_schedule(&dd->done_task); | |
308 | } | |
309 | ||
cadc4ab8 NR |
310 | static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd, |
311 | dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length) | |
bd3c7b5c | 312 | { |
cadc4ab8 | 313 | struct scatterlist sg[2]; |
bd3c7b5c | 314 | struct dma_async_tx_descriptor *in_desc, *out_desc; |
bd3c7b5c | 315 | |
cadc4ab8 | 316 | dd->dma_size = length; |
bd3c7b5c | 317 | |
cadc4ab8 NR |
318 | if (!(dd->flags & AES_FLAGS_FAST)) { |
319 | dma_sync_single_for_device(dd->dev, dma_addr_in, length, | |
320 | DMA_TO_DEVICE); | |
321 | } | |
bd3c7b5c | 322 | |
cadc4ab8 NR |
323 | if (dd->flags & AES_FLAGS_CFB8) { |
324 | dd->dma_lch_in.dma_conf.dst_addr_width = | |
325 | DMA_SLAVE_BUSWIDTH_1_BYTE; | |
326 | dd->dma_lch_out.dma_conf.src_addr_width = | |
327 | DMA_SLAVE_BUSWIDTH_1_BYTE; | |
328 | } else if (dd->flags & AES_FLAGS_CFB16) { | |
329 | dd->dma_lch_in.dma_conf.dst_addr_width = | |
330 | DMA_SLAVE_BUSWIDTH_2_BYTES; | |
331 | dd->dma_lch_out.dma_conf.src_addr_width = | |
332 | DMA_SLAVE_BUSWIDTH_2_BYTES; | |
333 | } else { | |
334 | dd->dma_lch_in.dma_conf.dst_addr_width = | |
335 | DMA_SLAVE_BUSWIDTH_4_BYTES; | |
336 | dd->dma_lch_out.dma_conf.src_addr_width = | |
337 | DMA_SLAVE_BUSWIDTH_4_BYTES; | |
338 | } | |
bd3c7b5c | 339 | |
cadc4ab8 NR |
340 | if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 | |
341 | AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) { | |
342 | dd->dma_lch_in.dma_conf.src_maxburst = 1; | |
343 | dd->dma_lch_in.dma_conf.dst_maxburst = 1; | |
344 | dd->dma_lch_out.dma_conf.src_maxburst = 1; | |
345 | dd->dma_lch_out.dma_conf.dst_maxburst = 1; | |
346 | } else { | |
347 | dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size; | |
348 | dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size; | |
349 | dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size; | |
350 | dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size; | |
351 | } | |
bd3c7b5c | 352 | |
cadc4ab8 NR |
353 | dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); |
354 | dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf); | |
bd3c7b5c | 355 | |
cadc4ab8 | 356 | dd->flags |= AES_FLAGS_DMA; |
bd3c7b5c | 357 | |
cadc4ab8 NR |
358 | sg_init_table(&sg[0], 1); |
359 | sg_dma_address(&sg[0]) = dma_addr_in; | |
360 | sg_dma_len(&sg[0]) = length; | |
bd3c7b5c | 361 | |
cadc4ab8 NR |
362 | sg_init_table(&sg[1], 1); |
363 | sg_dma_address(&sg[1]) = dma_addr_out; | |
364 | sg_dma_len(&sg[1]) = length; | |
365 | ||
366 | in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0], | |
367 | 1, DMA_MEM_TO_DEV, | |
368 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
369 | if (!in_desc) | |
370 | return -EINVAL; | |
bd3c7b5c | 371 | |
cadc4ab8 NR |
372 | out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1], |
373 | 1, DMA_DEV_TO_MEM, | |
374 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
bd3c7b5c | 375 | if (!out_desc) |
cadc4ab8 | 376 | return -EINVAL; |
bd3c7b5c NR |
377 | |
378 | out_desc->callback = atmel_aes_dma_callback; | |
379 | out_desc->callback_param = dd; | |
380 | ||
bd3c7b5c NR |
381 | dmaengine_submit(out_desc); |
382 | dma_async_issue_pending(dd->dma_lch_out.chan); | |
383 | ||
384 | dmaengine_submit(in_desc); | |
385 | dma_async_issue_pending(dd->dma_lch_in.chan); | |
386 | ||
387 | return 0; | |
bd3c7b5c NR |
388 | } |
389 | ||
390 | static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) | |
391 | { | |
392 | dd->flags &= ~AES_FLAGS_DMA; | |
393 | ||
394 | /* use cache buffers */ | |
395 | dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg); | |
396 | if (!dd->nb_in_sg) | |
397 | return -EINVAL; | |
398 | ||
399 | dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg); | |
7b5c253c | 400 | if (!dd->nb_out_sg) |
bd3c7b5c NR |
401 | return -EINVAL; |
402 | ||
403 | dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg, | |
404 | dd->buf_in, dd->total); | |
405 | ||
406 | if (!dd->bufcnt) | |
407 | return -EINVAL; | |
408 | ||
409 | dd->total -= dd->bufcnt; | |
410 | ||
411 | atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); | |
412 | atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in, | |
413 | dd->bufcnt >> 2); | |
414 | ||
415 | return 0; | |
416 | } | |
417 | ||
418 | static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd) | |
419 | { | |
cadc4ab8 NR |
420 | int err, fast = 0, in, out; |
421 | size_t count; | |
422 | dma_addr_t addr_in, addr_out; | |
423 | ||
424 | if ((!dd->in_offset) && (!dd->out_offset)) { | |
425 | /* check for alignment */ | |
426 | in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) && | |
427 | IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size); | |
428 | out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) && | |
429 | IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size); | |
430 | fast = in && out; | |
431 | ||
432 | if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg)) | |
433 | fast = 0; | |
434 | } | |
435 | ||
436 | ||
437 | if (fast) { | |
438 | count = min(dd->total, sg_dma_len(dd->in_sg)); | |
439 | count = min(count, sg_dma_len(dd->out_sg)); | |
440 | ||
441 | err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | |
442 | if (!err) { | |
443 | dev_err(dd->dev, "dma_map_sg() error\n"); | |
444 | return -EINVAL; | |
445 | } | |
446 | ||
447 | err = dma_map_sg(dd->dev, dd->out_sg, 1, | |
448 | DMA_FROM_DEVICE); | |
449 | if (!err) { | |
450 | dev_err(dd->dev, "dma_map_sg() error\n"); | |
451 | dma_unmap_sg(dd->dev, dd->in_sg, 1, | |
452 | DMA_TO_DEVICE); | |
453 | return -EINVAL; | |
454 | } | |
455 | ||
456 | addr_in = sg_dma_address(dd->in_sg); | |
457 | addr_out = sg_dma_address(dd->out_sg); | |
458 | ||
459 | dd->flags |= AES_FLAGS_FAST; | |
bd3c7b5c | 460 | |
bd3c7b5c | 461 | } else { |
cadc4ab8 NR |
462 | /* use cache buffers */ |
463 | count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset, | |
464 | dd->buf_in, dd->buflen, dd->total, 0); | |
465 | ||
466 | addr_in = dd->dma_addr_in; | |
467 | addr_out = dd->dma_addr_out; | |
468 | ||
469 | dd->flags &= ~AES_FLAGS_FAST; | |
bd3c7b5c NR |
470 | } |
471 | ||
cadc4ab8 | 472 | dd->total -= count; |
bd3c7b5c | 473 | |
cadc4ab8 NR |
474 | err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count); |
475 | ||
476 | if (err && (dd->flags & AES_FLAGS_FAST)) { | |
477 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | |
478 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); | |
479 | } | |
bd3c7b5c NR |
480 | |
481 | return err; | |
482 | } | |
483 | ||
484 | static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd) | |
485 | { | |
486 | int err; | |
487 | u32 valcr = 0, valmr = 0; | |
488 | ||
489 | err = atmel_aes_hw_init(dd); | |
490 | ||
491 | if (err) | |
492 | return err; | |
493 | ||
494 | /* MR register must be set before IV registers */ | |
495 | if (dd->ctx->keylen == AES_KEYSIZE_128) | |
496 | valmr |= AES_MR_KEYSIZE_128; | |
497 | else if (dd->ctx->keylen == AES_KEYSIZE_192) | |
498 | valmr |= AES_MR_KEYSIZE_192; | |
499 | else | |
500 | valmr |= AES_MR_KEYSIZE_256; | |
501 | ||
502 | if (dd->flags & AES_FLAGS_CBC) { | |
503 | valmr |= AES_MR_OPMOD_CBC; | |
504 | } else if (dd->flags & AES_FLAGS_CFB) { | |
505 | valmr |= AES_MR_OPMOD_CFB; | |
506 | if (dd->flags & AES_FLAGS_CFB8) | |
507 | valmr |= AES_MR_CFBS_8b; | |
508 | else if (dd->flags & AES_FLAGS_CFB16) | |
509 | valmr |= AES_MR_CFBS_16b; | |
510 | else if (dd->flags & AES_FLAGS_CFB32) | |
511 | valmr |= AES_MR_CFBS_32b; | |
512 | else if (dd->flags & AES_FLAGS_CFB64) | |
513 | valmr |= AES_MR_CFBS_64b; | |
cadc4ab8 NR |
514 | else if (dd->flags & AES_FLAGS_CFB128) |
515 | valmr |= AES_MR_CFBS_128b; | |
bd3c7b5c NR |
516 | } else if (dd->flags & AES_FLAGS_OFB) { |
517 | valmr |= AES_MR_OPMOD_OFB; | |
518 | } else if (dd->flags & AES_FLAGS_CTR) { | |
519 | valmr |= AES_MR_OPMOD_CTR; | |
520 | } else { | |
521 | valmr |= AES_MR_OPMOD_ECB; | |
522 | } | |
523 | ||
524 | if (dd->flags & AES_FLAGS_ENCRYPT) | |
525 | valmr |= AES_MR_CYPHER_ENC; | |
526 | ||
527 | if (dd->total > ATMEL_AES_DMA_THRESHOLD) { | |
528 | valmr |= AES_MR_SMOD_IDATAR0; | |
cadc4ab8 | 529 | if (dd->caps.has_dualbuff) |
bd3c7b5c NR |
530 | valmr |= AES_MR_DUALBUFF; |
531 | } else { | |
532 | valmr |= AES_MR_SMOD_AUTO; | |
533 | } | |
534 | ||
535 | atmel_aes_write(dd, AES_CR, valcr); | |
536 | atmel_aes_write(dd, AES_MR, valmr); | |
537 | ||
538 | atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key, | |
539 | dd->ctx->keylen >> 2); | |
540 | ||
541 | if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) || | |
542 | (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) && | |
543 | dd->req->info) { | |
544 | atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4); | |
545 | } | |
546 | ||
547 | return 0; | |
548 | } | |
549 | ||
550 | static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, | |
551 | struct ablkcipher_request *req) | |
552 | { | |
553 | struct crypto_async_request *async_req, *backlog; | |
554 | struct atmel_aes_ctx *ctx; | |
555 | struct atmel_aes_reqctx *rctx; | |
556 | unsigned long flags; | |
557 | int err, ret = 0; | |
558 | ||
559 | spin_lock_irqsave(&dd->lock, flags); | |
560 | if (req) | |
561 | ret = ablkcipher_enqueue_request(&dd->queue, req); | |
562 | if (dd->flags & AES_FLAGS_BUSY) { | |
563 | spin_unlock_irqrestore(&dd->lock, flags); | |
564 | return ret; | |
565 | } | |
566 | backlog = crypto_get_backlog(&dd->queue); | |
567 | async_req = crypto_dequeue_request(&dd->queue); | |
568 | if (async_req) | |
569 | dd->flags |= AES_FLAGS_BUSY; | |
570 | spin_unlock_irqrestore(&dd->lock, flags); | |
571 | ||
572 | if (!async_req) | |
573 | return ret; | |
574 | ||
575 | if (backlog) | |
576 | backlog->complete(backlog, -EINPROGRESS); | |
577 | ||
578 | req = ablkcipher_request_cast(async_req); | |
579 | ||
580 | /* assign new request to device */ | |
581 | dd->req = req; | |
582 | dd->total = req->nbytes; | |
cadc4ab8 | 583 | dd->in_offset = 0; |
bd3c7b5c | 584 | dd->in_sg = req->src; |
cadc4ab8 | 585 | dd->out_offset = 0; |
bd3c7b5c NR |
586 | dd->out_sg = req->dst; |
587 | ||
588 | rctx = ablkcipher_request_ctx(req); | |
589 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | |
590 | rctx->mode &= AES_FLAGS_MODE_MASK; | |
591 | dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode; | |
592 | dd->ctx = ctx; | |
593 | ctx->dd = dd; | |
594 | ||
595 | err = atmel_aes_write_ctrl(dd); | |
596 | if (!err) { | |
597 | if (dd->total > ATMEL_AES_DMA_THRESHOLD) | |
598 | err = atmel_aes_crypt_dma_start(dd); | |
599 | else | |
600 | err = atmel_aes_crypt_cpu_start(dd); | |
601 | } | |
602 | if (err) { | |
603 | /* aes_task will not finish it, so do it here */ | |
604 | atmel_aes_finish_req(dd, err); | |
605 | tasklet_schedule(&dd->queue_task); | |
606 | } | |
607 | ||
608 | return ret; | |
609 | } | |
610 | ||
611 | static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd) | |
612 | { | |
613 | int err = -EINVAL; | |
cadc4ab8 | 614 | size_t count; |
bd3c7b5c NR |
615 | |
616 | if (dd->flags & AES_FLAGS_DMA) { | |
bd3c7b5c | 617 | err = 0; |
cadc4ab8 NR |
618 | if (dd->flags & AES_FLAGS_FAST) { |
619 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); | |
620 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | |
621 | } else { | |
622 | dma_sync_single_for_device(dd->dev, dd->dma_addr_out, | |
623 | dd->dma_size, DMA_FROM_DEVICE); | |
624 | ||
625 | /* copy data */ | |
626 | count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset, | |
627 | dd->buf_out, dd->buflen, dd->dma_size, 1); | |
628 | if (count != dd->dma_size) { | |
629 | err = -EINVAL; | |
630 | pr_err("not all data converted: %u\n", count); | |
631 | } | |
632 | } | |
bd3c7b5c NR |
633 | } |
634 | ||
635 | return err; | |
636 | } | |
637 | ||
cadc4ab8 NR |
638 | |
639 | static int atmel_aes_buff_init(struct atmel_aes_dev *dd) | |
640 | { | |
641 | int err = -ENOMEM; | |
642 | ||
643 | dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0); | |
644 | dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0); | |
645 | dd->buflen = PAGE_SIZE; | |
646 | dd->buflen &= ~(AES_BLOCK_SIZE - 1); | |
647 | ||
648 | if (!dd->buf_in || !dd->buf_out) { | |
649 | dev_err(dd->dev, "unable to alloc pages.\n"); | |
650 | goto err_alloc; | |
651 | } | |
652 | ||
653 | /* MAP here */ | |
654 | dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, | |
655 | dd->buflen, DMA_TO_DEVICE); | |
656 | if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { | |
657 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | |
658 | err = -EINVAL; | |
659 | goto err_map_in; | |
660 | } | |
661 | ||
662 | dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, | |
663 | dd->buflen, DMA_FROM_DEVICE); | |
664 | if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { | |
665 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | |
666 | err = -EINVAL; | |
667 | goto err_map_out; | |
668 | } | |
669 | ||
670 | return 0; | |
671 | ||
672 | err_map_out: | |
673 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, | |
674 | DMA_TO_DEVICE); | |
675 | err_map_in: | |
676 | free_page((unsigned long)dd->buf_out); | |
677 | free_page((unsigned long)dd->buf_in); | |
678 | err_alloc: | |
679 | if (err) | |
680 | pr_err("error: %d\n", err); | |
681 | return err; | |
682 | } | |
683 | ||
684 | static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) | |
685 | { | |
686 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, | |
687 | DMA_FROM_DEVICE); | |
688 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, | |
689 | DMA_TO_DEVICE); | |
690 | free_page((unsigned long)dd->buf_out); | |
691 | free_page((unsigned long)dd->buf_in); | |
692 | } | |
693 | ||
bd3c7b5c NR |
694 | static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) |
695 | { | |
696 | struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx( | |
697 | crypto_ablkcipher_reqtfm(req)); | |
698 | struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); | |
699 | struct atmel_aes_dev *dd; | |
700 | ||
cadc4ab8 NR |
701 | if (mode & AES_FLAGS_CFB8) { |
702 | if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) { | |
703 | pr_err("request size is not exact amount of CFB8 blocks\n"); | |
704 | return -EINVAL; | |
705 | } | |
706 | ctx->block_size = CFB8_BLOCK_SIZE; | |
707 | } else if (mode & AES_FLAGS_CFB16) { | |
708 | if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) { | |
709 | pr_err("request size is not exact amount of CFB16 blocks\n"); | |
710 | return -EINVAL; | |
711 | } | |
712 | ctx->block_size = CFB16_BLOCK_SIZE; | |
713 | } else if (mode & AES_FLAGS_CFB32) { | |
714 | if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) { | |
715 | pr_err("request size is not exact amount of CFB32 blocks\n"); | |
716 | return -EINVAL; | |
717 | } | |
718 | ctx->block_size = CFB32_BLOCK_SIZE; | |
9f84951f LZ |
719 | } else if (mode & AES_FLAGS_CFB64) { |
720 | if (!IS_ALIGNED(req->nbytes, CFB64_BLOCK_SIZE)) { | |
721 | pr_err("request size is not exact amount of CFB64 blocks\n"); | |
722 | return -EINVAL; | |
723 | } | |
724 | ctx->block_size = CFB64_BLOCK_SIZE; | |
cadc4ab8 NR |
725 | } else { |
726 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { | |
727 | pr_err("request size is not exact amount of AES blocks\n"); | |
728 | return -EINVAL; | |
729 | } | |
730 | ctx->block_size = AES_BLOCK_SIZE; | |
bd3c7b5c NR |
731 | } |
732 | ||
733 | dd = atmel_aes_find_dev(ctx); | |
734 | if (!dd) | |
735 | return -ENODEV; | |
736 | ||
737 | rctx->mode = mode; | |
738 | ||
739 | return atmel_aes_handle_queue(dd, req); | |
740 | } | |
741 | ||
742 | static bool atmel_aes_filter(struct dma_chan *chan, void *slave) | |
743 | { | |
744 | struct at_dma_slave *sl = slave; | |
745 | ||
746 | if (sl && sl->dma_dev == chan->device->dev) { | |
747 | chan->private = sl; | |
748 | return true; | |
749 | } else { | |
750 | return false; | |
751 | } | |
752 | } | |
753 | ||
cadc4ab8 NR |
754 | static int atmel_aes_dma_init(struct atmel_aes_dev *dd, |
755 | struct crypto_platform_data *pdata) | |
bd3c7b5c NR |
756 | { |
757 | int err = -ENOMEM; | |
be943c7d NF |
758 | dma_cap_mask_t mask; |
759 | ||
760 | dma_cap_zero(mask); | |
761 | dma_cap_set(DMA_SLAVE, mask); | |
762 | ||
763 | /* Try to grab 2 DMA channels */ | |
764 | dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask, | |
765 | atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx"); | |
766 | if (!dd->dma_lch_in.chan) | |
767 | goto err_dma_in; | |
768 | ||
769 | dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; | |
770 | dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + | |
771 | AES_IDATAR(0); | |
772 | dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size; | |
773 | dd->dma_lch_in.dma_conf.src_addr_width = | |
774 | DMA_SLAVE_BUSWIDTH_4_BYTES; | |
775 | dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size; | |
776 | dd->dma_lch_in.dma_conf.dst_addr_width = | |
777 | DMA_SLAVE_BUSWIDTH_4_BYTES; | |
778 | dd->dma_lch_in.dma_conf.device_fc = false; | |
779 | ||
780 | dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask, | |
781 | atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx"); | |
782 | if (!dd->dma_lch_out.chan) | |
783 | goto err_dma_out; | |
784 | ||
785 | dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM; | |
786 | dd->dma_lch_out.dma_conf.src_addr = dd->phys_base + | |
787 | AES_ODATAR(0); | |
788 | dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size; | |
789 | dd->dma_lch_out.dma_conf.src_addr_width = | |
790 | DMA_SLAVE_BUSWIDTH_4_BYTES; | |
791 | dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size; | |
792 | dd->dma_lch_out.dma_conf.dst_addr_width = | |
793 | DMA_SLAVE_BUSWIDTH_4_BYTES; | |
794 | dd->dma_lch_out.dma_conf.device_fc = false; | |
bd3c7b5c | 795 | |
be943c7d | 796 | return 0; |
bd3c7b5c NR |
797 | |
798 | err_dma_out: | |
799 | dma_release_channel(dd->dma_lch_in.chan); | |
800 | err_dma_in: | |
be943c7d | 801 | dev_warn(dd->dev, "no DMA channel available\n"); |
bd3c7b5c NR |
802 | return err; |
803 | } | |
804 | ||
805 | static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) | |
806 | { | |
807 | dma_release_channel(dd->dma_lch_in.chan); | |
808 | dma_release_channel(dd->dma_lch_out.chan); | |
809 | } | |
810 | ||
811 | static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |
812 | unsigned int keylen) | |
813 | { | |
814 | struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | |
815 | ||
816 | if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && | |
817 | keylen != AES_KEYSIZE_256) { | |
818 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
819 | return -EINVAL; | |
820 | } | |
821 | ||
822 | memcpy(ctx->key, key, keylen); | |
823 | ctx->keylen = keylen; | |
824 | ||
825 | return 0; | |
826 | } | |
827 | ||
828 | static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req) | |
829 | { | |
830 | return atmel_aes_crypt(req, | |
831 | AES_FLAGS_ENCRYPT); | |
832 | } | |
833 | ||
834 | static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req) | |
835 | { | |
836 | return atmel_aes_crypt(req, | |
837 | 0); | |
838 | } | |
839 | ||
840 | static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req) | |
841 | { | |
842 | return atmel_aes_crypt(req, | |
843 | AES_FLAGS_ENCRYPT | AES_FLAGS_CBC); | |
844 | } | |
845 | ||
846 | static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req) | |
847 | { | |
848 | return atmel_aes_crypt(req, | |
849 | AES_FLAGS_CBC); | |
850 | } | |
851 | ||
852 | static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req) | |
853 | { | |
854 | return atmel_aes_crypt(req, | |
855 | AES_FLAGS_ENCRYPT | AES_FLAGS_OFB); | |
856 | } | |
857 | ||
858 | static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req) | |
859 | { | |
860 | return atmel_aes_crypt(req, | |
861 | AES_FLAGS_OFB); | |
862 | } | |
863 | ||
864 | static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req) | |
865 | { | |
866 | return atmel_aes_crypt(req, | |
cadc4ab8 | 867 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128); |
bd3c7b5c NR |
868 | } |
869 | ||
870 | static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req) | |
871 | { | |
872 | return atmel_aes_crypt(req, | |
cadc4ab8 | 873 | AES_FLAGS_CFB | AES_FLAGS_CFB128); |
bd3c7b5c NR |
874 | } |
875 | ||
876 | static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req) | |
877 | { | |
878 | return atmel_aes_crypt(req, | |
879 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64); | |
880 | } | |
881 | ||
882 | static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req) | |
883 | { | |
884 | return atmel_aes_crypt(req, | |
885 | AES_FLAGS_CFB | AES_FLAGS_CFB64); | |
886 | } | |
887 | ||
888 | static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req) | |
889 | { | |
890 | return atmel_aes_crypt(req, | |
891 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32); | |
892 | } | |
893 | ||
894 | static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req) | |
895 | { | |
896 | return atmel_aes_crypt(req, | |
897 | AES_FLAGS_CFB | AES_FLAGS_CFB32); | |
898 | } | |
899 | ||
900 | static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req) | |
901 | { | |
902 | return atmel_aes_crypt(req, | |
903 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16); | |
904 | } | |
905 | ||
906 | static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req) | |
907 | { | |
908 | return atmel_aes_crypt(req, | |
909 | AES_FLAGS_CFB | AES_FLAGS_CFB16); | |
910 | } | |
911 | ||
912 | static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req) | |
913 | { | |
914 | return atmel_aes_crypt(req, | |
915 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB8); | |
916 | } | |
917 | ||
918 | static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req) | |
919 | { | |
920 | return atmel_aes_crypt(req, | |
921 | AES_FLAGS_CFB | AES_FLAGS_CFB8); | |
922 | } | |
923 | ||
924 | static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req) | |
925 | { | |
926 | return atmel_aes_crypt(req, | |
927 | AES_FLAGS_ENCRYPT | AES_FLAGS_CTR); | |
928 | } | |
929 | ||
930 | static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req) | |
931 | { | |
932 | return atmel_aes_crypt(req, | |
933 | AES_FLAGS_CTR); | |
934 | } | |
935 | ||
936 | static int atmel_aes_cra_init(struct crypto_tfm *tfm) | |
937 | { | |
938 | tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx); | |
939 | ||
940 | return 0; | |
941 | } | |
942 | ||
943 | static void atmel_aes_cra_exit(struct crypto_tfm *tfm) | |
944 | { | |
945 | } | |
946 | ||
947 | static struct crypto_alg aes_algs[] = { | |
948 | { | |
949 | .cra_name = "ecb(aes)", | |
950 | .cra_driver_name = "atmel-ecb-aes", | |
951 | .cra_priority = 100, | |
952 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | |
953 | .cra_blocksize = AES_BLOCK_SIZE, | |
954 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | |
cadc4ab8 | 955 | .cra_alignmask = 0xf, |
bd3c7b5c NR |
956 | .cra_type = &crypto_ablkcipher_type, |
957 | .cra_module = THIS_MODULE, | |
958 | .cra_init = atmel_aes_cra_init, | |
959 | .cra_exit = atmel_aes_cra_exit, | |
960 | .cra_u.ablkcipher = { | |
961 | .min_keysize = AES_MIN_KEY_SIZE, | |
962 | .max_keysize = AES_MAX_KEY_SIZE, | |
963 | .setkey = atmel_aes_setkey, | |
964 | .encrypt = atmel_aes_ecb_encrypt, | |
965 | .decrypt = atmel_aes_ecb_decrypt, | |
966 | } | |
967 | }, | |
968 | { | |
969 | .cra_name = "cbc(aes)", | |
970 | .cra_driver_name = "atmel-cbc-aes", | |
971 | .cra_priority = 100, | |
972 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | |
973 | .cra_blocksize = AES_BLOCK_SIZE, | |
974 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | |
cadc4ab8 | 975 | .cra_alignmask = 0xf, |
bd3c7b5c NR |
976 | .cra_type = &crypto_ablkcipher_type, |
977 | .cra_module = THIS_MODULE, | |
978 | .cra_init = atmel_aes_cra_init, | |
979 | .cra_exit = atmel_aes_cra_exit, | |
980 | .cra_u.ablkcipher = { | |
981 | .min_keysize = AES_MIN_KEY_SIZE, | |
982 | .max_keysize = AES_MAX_KEY_SIZE, | |
983 | .ivsize = AES_BLOCK_SIZE, | |
984 | .setkey = atmel_aes_setkey, | |
985 | .encrypt = atmel_aes_cbc_encrypt, | |
986 | .decrypt = atmel_aes_cbc_decrypt, | |
987 | } | |
988 | }, | |
989 | { | |
990 | .cra_name = "ofb(aes)", | |
991 | .cra_driver_name = "atmel-ofb-aes", | |
992 | .cra_priority = 100, | |
993 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | |
994 | .cra_blocksize = AES_BLOCK_SIZE, | |
995 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | |
cadc4ab8 | 996 | .cra_alignmask = 0xf, |
bd3c7b5c NR |
997 | .cra_type = &crypto_ablkcipher_type, |
998 | .cra_module = THIS_MODULE, | |
999 | .cra_init = atmel_aes_cra_init, | |
1000 | .cra_exit = atmel_aes_cra_exit, | |
1001 | .cra_u.ablkcipher = { | |
1002 | .min_keysize = AES_MIN_KEY_SIZE, | |
1003 | .max_keysize = AES_MAX_KEY_SIZE, | |
1004 | .ivsize = AES_BLOCK_SIZE, | |
1005 | .setkey = atmel_aes_setkey, | |
1006 | .encrypt = atmel_aes_ofb_encrypt, | |
1007 | .decrypt = atmel_aes_ofb_decrypt, | |
1008 | } | |
1009 | }, | |
1010 | { | |
1011 | .cra_name = "cfb(aes)", | |
1012 | .cra_driver_name = "atmel-cfb-aes", | |
1013 | .cra_priority = 100, | |
1014 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | |
1015 | .cra_blocksize = AES_BLOCK_SIZE, | |
1016 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | |
cadc4ab8 | 1017 | .cra_alignmask = 0xf, |
bd3c7b5c NR |
1018 | .cra_type = &crypto_ablkcipher_type, |
1019 | .cra_module = THIS_MODULE, | |
1020 | .cra_init = atmel_aes_cra_init, | |
1021 | .cra_exit = atmel_aes_cra_exit, | |
1022 | .cra_u.ablkcipher = { | |
1023 | .min_keysize = AES_MIN_KEY_SIZE, | |
1024 | .max_keysize = AES_MAX_KEY_SIZE, | |
1025 | .ivsize = AES_BLOCK_SIZE, | |
1026 | .setkey = atmel_aes_setkey, | |
1027 | .encrypt = atmel_aes_cfb_encrypt, | |
1028 | .decrypt = atmel_aes_cfb_decrypt, | |
1029 | } | |
1030 | }, | |
1031 | { | |
1032 | .cra_name = "cfb32(aes)", | |
1033 | .cra_driver_name = "atmel-cfb32-aes", | |
1034 | .cra_priority = 100, | |
1035 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | |
1036 | .cra_blocksize = CFB32_BLOCK_SIZE, | |
1037 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | |
cadc4ab8 | 1038 | .cra_alignmask = 0x3, |
bd3c7b5c NR |
1039 | .cra_type = &crypto_ablkcipher_type, |
1040 | .cra_module = THIS_MODULE, | |
1041 | .cra_init = atmel_aes_cra_init, | |
1042 | .cra_exit = atmel_aes_cra_exit, | |
1043 | .cra_u.ablkcipher = { | |
1044 | .min_keysize = AES_MIN_KEY_SIZE, | |
1045 | .max_keysize = AES_MAX_KEY_SIZE, | |
1046 | .ivsize = AES_BLOCK_SIZE, | |
1047 | .setkey = atmel_aes_setkey, | |
1048 | .encrypt = atmel_aes_cfb32_encrypt, | |
1049 | .decrypt = atmel_aes_cfb32_decrypt, | |
1050 | } | |
1051 | }, | |
1052 | { | |
1053 | .cra_name = "cfb16(aes)", | |
1054 | .cra_driver_name = "atmel-cfb16-aes", | |
1055 | .cra_priority = 100, | |
1056 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | |
1057 | .cra_blocksize = CFB16_BLOCK_SIZE, | |
1058 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | |
cadc4ab8 | 1059 | .cra_alignmask = 0x1, |
bd3c7b5c NR |
1060 | .cra_type = &crypto_ablkcipher_type, |
1061 | .cra_module = THIS_MODULE, | |
1062 | .cra_init = atmel_aes_cra_init, | |
1063 | .cra_exit = atmel_aes_cra_exit, | |
1064 | .cra_u.ablkcipher = { | |
1065 | .min_keysize = AES_MIN_KEY_SIZE, | |
1066 | .max_keysize = AES_MAX_KEY_SIZE, | |
1067 | .ivsize = AES_BLOCK_SIZE, | |
1068 | .setkey = atmel_aes_setkey, | |
1069 | .encrypt = atmel_aes_cfb16_encrypt, | |
1070 | .decrypt = atmel_aes_cfb16_decrypt, | |
1071 | } | |
1072 | }, | |
1073 | { | |
1074 | .cra_name = "cfb8(aes)", | |
1075 | .cra_driver_name = "atmel-cfb8-aes", | |
1076 | .cra_priority = 100, | |
1077 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | |
e5d8c961 | 1078 | .cra_blocksize = CFB8_BLOCK_SIZE, |
bd3c7b5c NR |
1079 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
1080 | .cra_alignmask = 0x0, | |
1081 | .cra_type = &crypto_ablkcipher_type, | |
1082 | .cra_module = THIS_MODULE, | |
1083 | .cra_init = atmel_aes_cra_init, | |
1084 | .cra_exit = atmel_aes_cra_exit, | |
1085 | .cra_u.ablkcipher = { | |
1086 | .min_keysize = AES_MIN_KEY_SIZE, | |
1087 | .max_keysize = AES_MAX_KEY_SIZE, | |
1088 | .ivsize = AES_BLOCK_SIZE, | |
1089 | .setkey = atmel_aes_setkey, | |
1090 | .encrypt = atmel_aes_cfb8_encrypt, | |
1091 | .decrypt = atmel_aes_cfb8_decrypt, | |
1092 | } | |
1093 | }, | |
1094 | { | |
1095 | .cra_name = "ctr(aes)", | |
1096 | .cra_driver_name = "atmel-ctr-aes", | |
1097 | .cra_priority = 100, | |
1098 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | |
1099 | .cra_blocksize = AES_BLOCK_SIZE, | |
1100 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | |
cadc4ab8 | 1101 | .cra_alignmask = 0xf, |
bd3c7b5c NR |
1102 | .cra_type = &crypto_ablkcipher_type, |
1103 | .cra_module = THIS_MODULE, | |
1104 | .cra_init = atmel_aes_cra_init, | |
1105 | .cra_exit = atmel_aes_cra_exit, | |
1106 | .cra_u.ablkcipher = { | |
1107 | .min_keysize = AES_MIN_KEY_SIZE, | |
1108 | .max_keysize = AES_MAX_KEY_SIZE, | |
1109 | .ivsize = AES_BLOCK_SIZE, | |
1110 | .setkey = atmel_aes_setkey, | |
1111 | .encrypt = atmel_aes_ctr_encrypt, | |
1112 | .decrypt = atmel_aes_ctr_decrypt, | |
1113 | } | |
1114 | }, | |
1115 | }; | |
1116 | ||
cadc4ab8 | 1117 | static struct crypto_alg aes_cfb64_alg = { |
bd3c7b5c NR |
1118 | .cra_name = "cfb64(aes)", |
1119 | .cra_driver_name = "atmel-cfb64-aes", | |
1120 | .cra_priority = 100, | |
1121 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | |
1122 | .cra_blocksize = CFB64_BLOCK_SIZE, | |
1123 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | |
cadc4ab8 | 1124 | .cra_alignmask = 0x7, |
bd3c7b5c NR |
1125 | .cra_type = &crypto_ablkcipher_type, |
1126 | .cra_module = THIS_MODULE, | |
1127 | .cra_init = atmel_aes_cra_init, | |
1128 | .cra_exit = atmel_aes_cra_exit, | |
1129 | .cra_u.ablkcipher = { | |
1130 | .min_keysize = AES_MIN_KEY_SIZE, | |
1131 | .max_keysize = AES_MAX_KEY_SIZE, | |
1132 | .ivsize = AES_BLOCK_SIZE, | |
1133 | .setkey = atmel_aes_setkey, | |
1134 | .encrypt = atmel_aes_cfb64_encrypt, | |
1135 | .decrypt = atmel_aes_cfb64_decrypt, | |
1136 | } | |
bd3c7b5c NR |
1137 | }; |
1138 | ||
1139 | static void atmel_aes_queue_task(unsigned long data) | |
1140 | { | |
1141 | struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data; | |
1142 | ||
1143 | atmel_aes_handle_queue(dd, NULL); | |
1144 | } | |
1145 | ||
1146 | static void atmel_aes_done_task(unsigned long data) | |
1147 | { | |
1148 | struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data; | |
1149 | int err; | |
1150 | ||
1151 | if (!(dd->flags & AES_FLAGS_DMA)) { | |
1152 | atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out, | |
1153 | dd->bufcnt >> 2); | |
1154 | ||
1155 | if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg, | |
1156 | dd->buf_out, dd->bufcnt)) | |
1157 | err = 0; | |
1158 | else | |
1159 | err = -EINVAL; | |
1160 | ||
1161 | goto cpu_end; | |
1162 | } | |
1163 | ||
1164 | err = atmel_aes_crypt_dma_stop(dd); | |
1165 | ||
1166 | err = dd->err ? : err; | |
1167 | ||
1168 | if (dd->total && !err) { | |
cadc4ab8 NR |
1169 | if (dd->flags & AES_FLAGS_FAST) { |
1170 | dd->in_sg = sg_next(dd->in_sg); | |
1171 | dd->out_sg = sg_next(dd->out_sg); | |
1172 | if (!dd->in_sg || !dd->out_sg) | |
1173 | err = -EINVAL; | |
1174 | } | |
1175 | if (!err) | |
1176 | err = atmel_aes_crypt_dma_start(dd); | |
bd3c7b5c NR |
1177 | if (!err) |
1178 | return; /* DMA started. Not fininishing. */ | |
1179 | } | |
1180 | ||
1181 | cpu_end: | |
1182 | atmel_aes_finish_req(dd, err); | |
1183 | atmel_aes_handle_queue(dd, NULL); | |
1184 | } | |
1185 | ||
1186 | static irqreturn_t atmel_aes_irq(int irq, void *dev_id) | |
1187 | { | |
1188 | struct atmel_aes_dev *aes_dd = dev_id; | |
1189 | u32 reg; | |
1190 | ||
1191 | reg = atmel_aes_read(aes_dd, AES_ISR); | |
1192 | if (reg & atmel_aes_read(aes_dd, AES_IMR)) { | |
1193 | atmel_aes_write(aes_dd, AES_IDR, reg); | |
1194 | if (AES_FLAGS_BUSY & aes_dd->flags) | |
1195 | tasklet_schedule(&aes_dd->done_task); | |
1196 | else | |
1197 | dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n"); | |
1198 | return IRQ_HANDLED; | |
1199 | } | |
1200 | ||
1201 | return IRQ_NONE; | |
1202 | } | |
1203 | ||
1204 | static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) | |
1205 | { | |
1206 | int i; | |
1207 | ||
1208 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) | |
1209 | crypto_unregister_alg(&aes_algs[i]); | |
cadc4ab8 NR |
1210 | if (dd->caps.has_cfb64) |
1211 | crypto_unregister_alg(&aes_cfb64_alg); | |
bd3c7b5c NR |
1212 | } |
1213 | ||
1214 | static int atmel_aes_register_algs(struct atmel_aes_dev *dd) | |
1215 | { | |
1216 | int err, i, j; | |
1217 | ||
1218 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { | |
bd3c7b5c NR |
1219 | err = crypto_register_alg(&aes_algs[i]); |
1220 | if (err) | |
1221 | goto err_aes_algs; | |
1222 | } | |
1223 | ||
cadc4ab8 NR |
1224 | if (dd->caps.has_cfb64) { |
1225 | err = crypto_register_alg(&aes_cfb64_alg); | |
bd3c7b5c NR |
1226 | if (err) |
1227 | goto err_aes_cfb64_alg; | |
1228 | } | |
1229 | ||
1230 | return 0; | |
1231 | ||
1232 | err_aes_cfb64_alg: | |
1233 | i = ARRAY_SIZE(aes_algs); | |
1234 | err_aes_algs: | |
1235 | for (j = 0; j < i; j++) | |
1236 | crypto_unregister_alg(&aes_algs[j]); | |
1237 | ||
1238 | return err; | |
1239 | } | |
1240 | ||
cadc4ab8 NR |
1241 | static void atmel_aes_get_cap(struct atmel_aes_dev *dd) |
1242 | { | |
1243 | dd->caps.has_dualbuff = 0; | |
1244 | dd->caps.has_cfb64 = 0; | |
1245 | dd->caps.max_burst_size = 1; | |
1246 | ||
1247 | /* keep only major version number */ | |
1248 | switch (dd->hw_version & 0xff0) { | |
1249 | case 0x130: | |
1250 | dd->caps.has_dualbuff = 1; | |
1251 | dd->caps.has_cfb64 = 1; | |
1252 | dd->caps.max_burst_size = 4; | |
1253 | break; | |
1254 | case 0x120: | |
1255 | break; | |
1256 | default: | |
1257 | dev_warn(dd->dev, | |
1258 | "Unmanaged aes version, set minimum capabilities\n"); | |
1259 | break; | |
1260 | } | |
1261 | } | |
1262 | ||
be943c7d NF |
1263 | #if defined(CONFIG_OF) |
1264 | static const struct of_device_id atmel_aes_dt_ids[] = { | |
1265 | { .compatible = "atmel,at91sam9g46-aes" }, | |
1266 | { /* sentinel */ } | |
1267 | }; | |
1268 | MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids); | |
1269 | ||
1270 | static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev) | |
1271 | { | |
1272 | struct device_node *np = pdev->dev.of_node; | |
1273 | struct crypto_platform_data *pdata; | |
1274 | ||
1275 | if (!np) { | |
1276 | dev_err(&pdev->dev, "device node not found\n"); | |
1277 | return ERR_PTR(-EINVAL); | |
1278 | } | |
1279 | ||
1280 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | |
1281 | if (!pdata) { | |
1282 | dev_err(&pdev->dev, "could not allocate memory for pdata\n"); | |
1283 | return ERR_PTR(-ENOMEM); | |
1284 | } | |
1285 | ||
1286 | pdata->dma_slave = devm_kzalloc(&pdev->dev, | |
1287 | sizeof(*(pdata->dma_slave)), | |
1288 | GFP_KERNEL); | |
1289 | if (!pdata->dma_slave) { | |
1290 | dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); | |
1291 | devm_kfree(&pdev->dev, pdata); | |
1292 | return ERR_PTR(-ENOMEM); | |
1293 | } | |
1294 | ||
1295 | return pdata; | |
1296 | } | |
1297 | #else | |
1298 | static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev) | |
1299 | { | |
1300 | return ERR_PTR(-EINVAL); | |
1301 | } | |
1302 | #endif | |
1303 | ||
49cfe4db | 1304 | static int atmel_aes_probe(struct platform_device *pdev) |
bd3c7b5c NR |
1305 | { |
1306 | struct atmel_aes_dev *aes_dd; | |
cadc4ab8 | 1307 | struct crypto_platform_data *pdata; |
bd3c7b5c NR |
1308 | struct device *dev = &pdev->dev; |
1309 | struct resource *aes_res; | |
1310 | unsigned long aes_phys_size; | |
1311 | int err; | |
1312 | ||
1313 | pdata = pdev->dev.platform_data; | |
1314 | if (!pdata) { | |
be943c7d NF |
1315 | pdata = atmel_aes_of_init(pdev); |
1316 | if (IS_ERR(pdata)) { | |
1317 | err = PTR_ERR(pdata); | |
1318 | goto aes_dd_err; | |
1319 | } | |
1320 | } | |
1321 | ||
1322 | if (!pdata->dma_slave) { | |
bd3c7b5c NR |
1323 | err = -ENXIO; |
1324 | goto aes_dd_err; | |
1325 | } | |
1326 | ||
1327 | aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL); | |
1328 | if (aes_dd == NULL) { | |
1329 | dev_err(dev, "unable to alloc data struct.\n"); | |
1330 | err = -ENOMEM; | |
1331 | goto aes_dd_err; | |
1332 | } | |
1333 | ||
1334 | aes_dd->dev = dev; | |
1335 | ||
1336 | platform_set_drvdata(pdev, aes_dd); | |
1337 | ||
1338 | INIT_LIST_HEAD(&aes_dd->list); | |
1339 | ||
1340 | tasklet_init(&aes_dd->done_task, atmel_aes_done_task, | |
1341 | (unsigned long)aes_dd); | |
1342 | tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task, | |
1343 | (unsigned long)aes_dd); | |
1344 | ||
1345 | crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH); | |
1346 | ||
1347 | aes_dd->irq = -1; | |
1348 | ||
1349 | /* Get the base address */ | |
1350 | aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1351 | if (!aes_res) { | |
1352 | dev_err(dev, "no MEM resource info\n"); | |
1353 | err = -ENODEV; | |
1354 | goto res_err; | |
1355 | } | |
1356 | aes_dd->phys_base = aes_res->start; | |
1357 | aes_phys_size = resource_size(aes_res); | |
1358 | ||
1359 | /* Get the IRQ */ | |
1360 | aes_dd->irq = platform_get_irq(pdev, 0); | |
1361 | if (aes_dd->irq < 0) { | |
1362 | dev_err(dev, "no IRQ resource info\n"); | |
1363 | err = aes_dd->irq; | |
1364 | goto aes_irq_err; | |
1365 | } | |
1366 | ||
1367 | err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes", | |
1368 | aes_dd); | |
1369 | if (err) { | |
1370 | dev_err(dev, "unable to request aes irq.\n"); | |
1371 | goto aes_irq_err; | |
1372 | } | |
1373 | ||
1374 | /* Initializing the clock */ | |
cadc4ab8 | 1375 | aes_dd->iclk = clk_get(&pdev->dev, "aes_clk"); |
bd3c7b5c NR |
1376 | if (IS_ERR(aes_dd->iclk)) { |
1377 | dev_err(dev, "clock intialization failed.\n"); | |
1378 | err = PTR_ERR(aes_dd->iclk); | |
1379 | goto clk_err; | |
1380 | } | |
1381 | ||
1382 | aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size); | |
1383 | if (!aes_dd->io_base) { | |
1384 | dev_err(dev, "can't ioremap\n"); | |
1385 | err = -ENOMEM; | |
1386 | goto aes_io_err; | |
1387 | } | |
1388 | ||
cadc4ab8 NR |
1389 | atmel_aes_hw_version_init(aes_dd); |
1390 | ||
1391 | atmel_aes_get_cap(aes_dd); | |
1392 | ||
1393 | err = atmel_aes_buff_init(aes_dd); | |
1394 | if (err) | |
1395 | goto err_aes_buff; | |
1396 | ||
1397 | err = atmel_aes_dma_init(aes_dd, pdata); | |
bd3c7b5c NR |
1398 | if (err) |
1399 | goto err_aes_dma; | |
1400 | ||
1401 | spin_lock(&atmel_aes.lock); | |
1402 | list_add_tail(&aes_dd->list, &atmel_aes.dev_list); | |
1403 | spin_unlock(&atmel_aes.lock); | |
1404 | ||
1405 | err = atmel_aes_register_algs(aes_dd); | |
1406 | if (err) | |
1407 | goto err_algs; | |
1408 | ||
be943c7d NF |
1409 | dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n", |
1410 | dma_chan_name(aes_dd->dma_lch_in.chan), | |
1411 | dma_chan_name(aes_dd->dma_lch_out.chan)); | |
bd3c7b5c NR |
1412 | |
1413 | return 0; | |
1414 | ||
1415 | err_algs: | |
1416 | spin_lock(&atmel_aes.lock); | |
1417 | list_del(&aes_dd->list); | |
1418 | spin_unlock(&atmel_aes.lock); | |
1419 | atmel_aes_dma_cleanup(aes_dd); | |
1420 | err_aes_dma: | |
cadc4ab8 NR |
1421 | atmel_aes_buff_cleanup(aes_dd); |
1422 | err_aes_buff: | |
bd3c7b5c NR |
1423 | iounmap(aes_dd->io_base); |
1424 | aes_io_err: | |
1425 | clk_put(aes_dd->iclk); | |
1426 | clk_err: | |
1427 | free_irq(aes_dd->irq, aes_dd); | |
1428 | aes_irq_err: | |
1429 | res_err: | |
1430 | tasklet_kill(&aes_dd->done_task); | |
1431 | tasklet_kill(&aes_dd->queue_task); | |
1432 | kfree(aes_dd); | |
1433 | aes_dd = NULL; | |
1434 | aes_dd_err: | |
1435 | dev_err(dev, "initialization failed.\n"); | |
1436 | ||
1437 | return err; | |
1438 | } | |
1439 | ||
49cfe4db | 1440 | static int atmel_aes_remove(struct platform_device *pdev) |
bd3c7b5c NR |
1441 | { |
1442 | static struct atmel_aes_dev *aes_dd; | |
1443 | ||
1444 | aes_dd = platform_get_drvdata(pdev); | |
1445 | if (!aes_dd) | |
1446 | return -ENODEV; | |
1447 | spin_lock(&atmel_aes.lock); | |
1448 | list_del(&aes_dd->list); | |
1449 | spin_unlock(&atmel_aes.lock); | |
1450 | ||
1451 | atmel_aes_unregister_algs(aes_dd); | |
1452 | ||
1453 | tasklet_kill(&aes_dd->done_task); | |
1454 | tasklet_kill(&aes_dd->queue_task); | |
1455 | ||
1456 | atmel_aes_dma_cleanup(aes_dd); | |
1457 | ||
1458 | iounmap(aes_dd->io_base); | |
1459 | ||
1460 | clk_put(aes_dd->iclk); | |
1461 | ||
1462 | if (aes_dd->irq > 0) | |
1463 | free_irq(aes_dd->irq, aes_dd); | |
1464 | ||
1465 | kfree(aes_dd); | |
1466 | aes_dd = NULL; | |
1467 | ||
1468 | return 0; | |
1469 | } | |
1470 | ||
1471 | static struct platform_driver atmel_aes_driver = { | |
1472 | .probe = atmel_aes_probe, | |
49cfe4db | 1473 | .remove = atmel_aes_remove, |
bd3c7b5c NR |
1474 | .driver = { |
1475 | .name = "atmel_aes", | |
1476 | .owner = THIS_MODULE, | |
be943c7d | 1477 | .of_match_table = of_match_ptr(atmel_aes_dt_ids), |
bd3c7b5c NR |
1478 | }, |
1479 | }; | |
1480 | ||
1481 | module_platform_driver(atmel_aes_driver); | |
1482 | ||
1483 | MODULE_DESCRIPTION("Atmel AES hw acceleration support."); | |
1484 | MODULE_LICENSE("GPL v2"); | |
1485 | MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); |