Merge branch 'drm-next-4.7' of git://people.freedesktop.org/~agd5f/linux into drm...
[deliverable/linux.git] / drivers / crypto / amcc / crypto4xx_core.c
CommitLineData
049359d6
JH
1/**
2 * AMCC SoC PPC4xx Crypto Driver
3 *
4 * Copyright (c) 2008 Applied Micro Circuits Corporation.
5 * All rights reserved. James Hsiao <jhsiao@amcc.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * This file implements AMCC crypto offload Linux device driver for use with
18 * Linux CryptoAPI.
19 */
20
21#include <linux/kernel.h>
22#include <linux/interrupt.h>
23#include <linux/spinlock_types.h>
24#include <linux/random.h>
25#include <linux/scatterlist.h>
26#include <linux/crypto.h>
27#include <linux/dma-mapping.h>
28#include <linux/platform_device.h>
29#include <linux/init.h>
c11eede6
RH
30#include <linux/module.h>
31#include <linux/of_address.h>
32#include <linux/of_irq.h>
049359d6 33#include <linux/of_platform.h>
5a0e3ad6 34#include <linux/slab.h>
049359d6
JH
35#include <asm/dcr.h>
36#include <asm/dcr-regs.h>
37#include <asm/cacheflush.h>
049359d6
JH
38#include <crypto/aes.h>
39#include <crypto/sha.h>
40#include "crypto4xx_reg_def.h"
41#include "crypto4xx_core.h"
42#include "crypto4xx_sa.h"
5343e674 43#include "crypto4xx_trng.h"
049359d6
JH
44
45#define PPC4XX_SEC_VERSION_STR "0.5"
46
47/**
48 * PPC4xx Crypto Engine Initialization Routine
49 */
50static void crypto4xx_hw_init(struct crypto4xx_device *dev)
51{
52 union ce_ring_size ring_size;
53 union ce_ring_contol ring_ctrl;
54 union ce_part_ring_size part_ring_size;
55 union ce_io_threshold io_threshold;
56 u32 rand_num;
57 union ce_pe_dma_cfg pe_dma_cfg;
c5697462 58 u32 device_ctrl;
049359d6
JH
59
60 writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
61 /* setup pe dma, include reset sg, pdr and pe, then release reset */
62 pe_dma_cfg.w = 0;
63 pe_dma_cfg.bf.bo_sgpd_en = 1;
64 pe_dma_cfg.bf.bo_data_en = 0;
65 pe_dma_cfg.bf.bo_sa_en = 1;
66 pe_dma_cfg.bf.bo_pd_en = 1;
67 pe_dma_cfg.bf.dynamic_sa_en = 1;
68 pe_dma_cfg.bf.reset_sg = 1;
69 pe_dma_cfg.bf.reset_pdr = 1;
70 pe_dma_cfg.bf.reset_pe = 1;
71 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
72 /* un reset pe,sg and pdr */
73 pe_dma_cfg.bf.pe_mode = 0;
74 pe_dma_cfg.bf.reset_sg = 0;
75 pe_dma_cfg.bf.reset_pdr = 0;
76 pe_dma_cfg.bf.reset_pe = 0;
77 pe_dma_cfg.bf.bo_td_en = 0;
78 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
79 writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
80 writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
81 writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
82 get_random_bytes(&rand_num, sizeof(rand_num));
83 writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
84 get_random_bytes(&rand_num, sizeof(rand_num));
85 writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
86 ring_size.w = 0;
87 ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
88 ring_size.bf.ring_size = PPC4XX_NUM_PD;
89 writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
90 ring_ctrl.w = 0;
91 writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
c5697462
JB
92 device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
93 device_ctrl |= PPC4XX_DC_3DES_EN;
94 writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
049359d6
JH
95 writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
96 writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
97 part_ring_size.w = 0;
98 part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
99 part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
100 writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
101 writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
102 io_threshold.w = 0;
103 io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
104 io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD;
105 writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
106 writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
107 writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
108 writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
109 writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
110 writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
111 writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
112 writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
113 /* un reset pe,sg and pdr */
114 pe_dma_cfg.bf.pe_mode = 1;
115 pe_dma_cfg.bf.reset_sg = 0;
116 pe_dma_cfg.bf.reset_pdr = 0;
117 pe_dma_cfg.bf.reset_pe = 0;
118 pe_dma_cfg.bf.bo_td_en = 0;
119 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
120 /*clear all pending interrupt*/
121 writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
122 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
123 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
124 writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
125 writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
126}
127
128int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
129{
130 ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
131 &ctx->sa_in_dma_addr, GFP_ATOMIC);
132 if (ctx->sa_in == NULL)
133 return -ENOMEM;
134
135 ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
136 &ctx->sa_out_dma_addr, GFP_ATOMIC);
137 if (ctx->sa_out == NULL) {
138 dma_free_coherent(ctx->dev->core_dev->device,
139 ctx->sa_len * 4,
140 ctx->sa_in, ctx->sa_in_dma_addr);
141 return -ENOMEM;
142 }
143
144 memset(ctx->sa_in, 0, size * 4);
145 memset(ctx->sa_out, 0, size * 4);
146 ctx->sa_len = size;
147
148 return 0;
149}
150
151void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
152{
153 if (ctx->sa_in != NULL)
154 dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
155 ctx->sa_in, ctx->sa_in_dma_addr);
156 if (ctx->sa_out != NULL)
157 dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
158 ctx->sa_out, ctx->sa_out_dma_addr);
159
160 ctx->sa_in_dma_addr = 0;
161 ctx->sa_out_dma_addr = 0;
162 ctx->sa_len = 0;
163}
164
165u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
166{
167 ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
168 sizeof(struct sa_state_record),
169 &ctx->state_record_dma_addr, GFP_ATOMIC);
170 if (!ctx->state_record_dma_addr)
171 return -ENOMEM;
172 memset(ctx->state_record, 0, sizeof(struct sa_state_record));
173
174 return 0;
175}
176
177void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
178{
179 if (ctx->state_record != NULL)
180 dma_free_coherent(ctx->dev->core_dev->device,
181 sizeof(struct sa_state_record),
182 ctx->state_record,
183 ctx->state_record_dma_addr);
184 ctx->state_record_dma_addr = 0;
185}
186
187/**
188 * alloc memory for the gather ring
189 * no need to alloc buf for the ring
190 * gdr_tail, gdr_head and gdr_count are initialized by this function
191 */
192static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
193{
194 int i;
195 struct pd_uinfo *pd_uinfo;
196 dev->pdr = dma_alloc_coherent(dev->core_dev->device,
197 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
198 &dev->pdr_pa, GFP_ATOMIC);
199 if (!dev->pdr)
200 return -ENOMEM;
201
202 dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD,
203 GFP_KERNEL);
204 if (!dev->pdr_uinfo) {
205 dma_free_coherent(dev->core_dev->device,
206 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
207 dev->pdr,
208 dev->pdr_pa);
209 return -ENOMEM;
210 }
211 memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
212 dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
213 256 * PPC4XX_NUM_PD,
214 &dev->shadow_sa_pool_pa,
215 GFP_ATOMIC);
216 if (!dev->shadow_sa_pool)
217 return -ENOMEM;
218
219 dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
220 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
221 &dev->shadow_sr_pool_pa, GFP_ATOMIC);
222 if (!dev->shadow_sr_pool)
223 return -ENOMEM;
224 for (i = 0; i < PPC4XX_NUM_PD; i++) {
225 pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
226 sizeof(struct pd_uinfo) * i);
227
228 /* alloc 256 bytes which is enough for any kind of dynamic sa */
229 pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
230 pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
231
232 /* alloc state record */
233 pd_uinfo->sr_va = dev->shadow_sr_pool +
234 sizeof(struct sa_state_record) * i;
235 pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
236 sizeof(struct sa_state_record) * i;
237 }
238
239 return 0;
240}
241
242static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
243{
244 if (dev->pdr != NULL)
245 dma_free_coherent(dev->core_dev->device,
246 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
247 dev->pdr, dev->pdr_pa);
248 if (dev->shadow_sa_pool)
249 dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
250 dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
251 if (dev->shadow_sr_pool)
252 dma_free_coherent(dev->core_dev->device,
253 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
254 dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
255
256 kfree(dev->pdr_uinfo);
257}
258
259static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
260{
261 u32 retval;
262 u32 tmp;
263
264 retval = dev->pdr_head;
265 tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
266
267 if (tmp == dev->pdr_tail)
268 return ERING_WAS_FULL;
269
270 dev->pdr_head = tmp;
271
272 return retval;
273}
274
275static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
276{
277 struct pd_uinfo *pd_uinfo;
278 unsigned long flags;
279
280 pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
281 sizeof(struct pd_uinfo) * idx);
282 spin_lock_irqsave(&dev->core_dev->lock, flags);
283 if (dev->pdr_tail != PPC4XX_LAST_PD)
284 dev->pdr_tail++;
285 else
286 dev->pdr_tail = 0;
287 pd_uinfo->state = PD_ENTRY_FREE;
288 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
289
290 return 0;
291}
292
293static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
294 dma_addr_t *pd_dma, u32 idx)
295{
296 *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
297
298 return dev->pdr + sizeof(struct ce_pd) * idx;
299}
300
301/**
302 * alloc memory for the gather ring
303 * no need to alloc buf for the ring
304 * gdr_tail, gdr_head and gdr_count are initialized by this function
305 */
306static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
307{
308 dev->gdr = dma_alloc_coherent(dev->core_dev->device,
309 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
310 &dev->gdr_pa, GFP_ATOMIC);
311 if (!dev->gdr)
312 return -ENOMEM;
313
314 memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
315
316 return 0;
317}
318
319static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
320{
321 dma_free_coherent(dev->core_dev->device,
322 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
323 dev->gdr, dev->gdr_pa);
324}
325
326/*
327 * when this function is called.
328 * preemption or interrupt must be disabled
329 */
330u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
331{
332 u32 retval;
333 u32 tmp;
334 if (n >= PPC4XX_NUM_GD)
335 return ERING_WAS_FULL;
336
337 retval = dev->gdr_head;
338 tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
339 if (dev->gdr_head > dev->gdr_tail) {
340 if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
341 return ERING_WAS_FULL;
342 } else if (dev->gdr_head < dev->gdr_tail) {
343 if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
344 return ERING_WAS_FULL;
345 }
346 dev->gdr_head = tmp;
347
348 return retval;
349}
350
351static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
352{
353 unsigned long flags;
354
355 spin_lock_irqsave(&dev->core_dev->lock, flags);
356 if (dev->gdr_tail == dev->gdr_head) {
357 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
358 return 0;
359 }
360
361 if (dev->gdr_tail != PPC4XX_LAST_GD)
362 dev->gdr_tail++;
363 else
364 dev->gdr_tail = 0;
365
366 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
367
368 return 0;
369}
370
371static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
372 dma_addr_t *gd_dma, u32 idx)
373{
374 *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
375
376 return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
377}
378
379/**
380 * alloc memory for the scatter ring
381 * need to alloc buf for the ring
382 * sdr_tail, sdr_head and sdr_count are initialized by this function
383 */
384static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
385{
386 int i;
387 struct ce_sd *sd_array;
388
389 /* alloc memory for scatter descriptor ring */
390 dev->sdr = dma_alloc_coherent(dev->core_dev->device,
391 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
392 &dev->sdr_pa, GFP_ATOMIC);
393 if (!dev->sdr)
394 return -ENOMEM;
395
396 dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
397 dev->scatter_buffer_va =
398 dma_alloc_coherent(dev->core_dev->device,
399 dev->scatter_buffer_size * PPC4XX_NUM_SD,
400 &dev->scatter_buffer_pa, GFP_ATOMIC);
401 if (!dev->scatter_buffer_va) {
402 dma_free_coherent(dev->core_dev->device,
403 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
404 dev->sdr, dev->sdr_pa);
405 return -ENOMEM;
406 }
407
408 sd_array = dev->sdr;
409
410 for (i = 0; i < PPC4XX_NUM_SD; i++) {
411 sd_array[i].ptr = dev->scatter_buffer_pa +
412 dev->scatter_buffer_size * i;
413 }
414
415 return 0;
416}
417
418static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
419{
420 if (dev->sdr != NULL)
421 dma_free_coherent(dev->core_dev->device,
422 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
423 dev->sdr, dev->sdr_pa);
424
425 if (dev->scatter_buffer_va != NULL)
426 dma_free_coherent(dev->core_dev->device,
427 dev->scatter_buffer_size * PPC4XX_NUM_SD,
428 dev->scatter_buffer_va,
429 dev->scatter_buffer_pa);
430}
431
432/*
433 * when this function is called.
434 * preemption or interrupt must be disabled
435 */
436static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
437{
438 u32 retval;
439 u32 tmp;
440
441 if (n >= PPC4XX_NUM_SD)
442 return ERING_WAS_FULL;
443
444 retval = dev->sdr_head;
445 tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
446 if (dev->sdr_head > dev->gdr_tail) {
447 if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
448 return ERING_WAS_FULL;
449 } else if (dev->sdr_head < dev->sdr_tail) {
450 if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
451 return ERING_WAS_FULL;
452 } /* the head = tail, or empty case is already take cared */
453 dev->sdr_head = tmp;
454
455 return retval;
456}
457
458static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
459{
460 unsigned long flags;
461
462 spin_lock_irqsave(&dev->core_dev->lock, flags);
463 if (dev->sdr_tail == dev->sdr_head) {
464 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
465 return 0;
466 }
467 if (dev->sdr_tail != PPC4XX_LAST_SD)
468 dev->sdr_tail++;
469 else
470 dev->sdr_tail = 0;
471 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
472
473 return 0;
474}
475
476static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
477 dma_addr_t *sd_dma, u32 idx)
478{
479 *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
480
481 return (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
482}
483
484static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
485 dma_addr_t *addr, u32 *length,
486 u32 *idx, u32 *offset, u32 *nbytes)
487{
488 u32 len;
489
490 if (*length > dev->scatter_buffer_size) {
491 memcpy(phys_to_virt(*addr),
492 dev->scatter_buffer_va +
493 *idx * dev->scatter_buffer_size + *offset,
494 dev->scatter_buffer_size);
495 *offset = 0;
496 *length -= dev->scatter_buffer_size;
497 *nbytes -= dev->scatter_buffer_size;
498 if (*idx == PPC4XX_LAST_SD)
499 *idx = 0;
500 else
501 (*idx)++;
502 *addr = *addr + dev->scatter_buffer_size;
503 return 1;
504 } else if (*length < dev->scatter_buffer_size) {
505 memcpy(phys_to_virt(*addr),
506 dev->scatter_buffer_va +
507 *idx * dev->scatter_buffer_size + *offset, *length);
508 if ((*offset + *length) == dev->scatter_buffer_size) {
509 if (*idx == PPC4XX_LAST_SD)
510 *idx = 0;
511 else
512 (*idx)++;
513 *nbytes -= *length;
514 *offset = 0;
515 } else {
516 *nbytes -= *length;
517 *offset += *length;
518 }
519
520 return 0;
521 } else {
522 len = (*nbytes <= dev->scatter_buffer_size) ?
523 (*nbytes) : dev->scatter_buffer_size;
524 memcpy(phys_to_virt(*addr),
525 dev->scatter_buffer_va +
526 *idx * dev->scatter_buffer_size + *offset,
527 len);
528 *offset = 0;
529 *nbytes -= len;
530
531 if (*idx == PPC4XX_LAST_SD)
532 *idx = 0;
533 else
534 (*idx)++;
535
536 return 0;
537 }
538}
539
540static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
541 struct ce_pd *pd,
542 struct pd_uinfo *pd_uinfo,
543 u32 nbytes,
544 struct scatterlist *dst)
545{
546 dma_addr_t addr;
547 u32 this_sd;
548 u32 offset;
549 u32 len;
550 u32 i;
551 u32 sg_len;
552 struct scatterlist *sg;
553
554 this_sd = pd_uinfo->first_sd;
555 offset = 0;
556 i = 0;
557
558 while (nbytes) {
559 sg = &dst[i];
560 sg_len = sg->length;
561 addr = dma_map_page(dev->core_dev->device, sg_page(sg),
562 sg->offset, sg->length, DMA_TO_DEVICE);
563
564 if (offset == 0) {
565 len = (nbytes <= sg->length) ? nbytes : sg->length;
566 while (crypto4xx_fill_one_page(dev, &addr, &len,
567 &this_sd, &offset, &nbytes))
568 ;
569 if (!nbytes)
570 return;
571 i++;
572 } else {
573 len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
574 nbytes : (dev->scatter_buffer_size - offset);
575 len = (sg->length < len) ? sg->length : len;
576 while (crypto4xx_fill_one_page(dev, &addr, &len,
577 &this_sd, &offset, &nbytes))
578 ;
579 if (!nbytes)
580 return;
581 sg_len -= len;
582 if (sg_len) {
583 addr += len;
584 while (crypto4xx_fill_one_page(dev, &addr,
585 &sg_len, &this_sd, &offset, &nbytes))
586 ;
587 }
588 i++;
589 }
590 }
591}
592
593static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
594 struct crypto4xx_ctx *ctx)
595{
596 struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
597 struct sa_state_record *state_record =
598 (struct sa_state_record *) pd_uinfo->sr_va;
599
600 if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
601 memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
602 SA_HASH_ALG_SHA1_DIGEST_SIZE);
603 }
604
605 return 0;
606}
607
608static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
609 struct pd_uinfo *pd_uinfo)
610{
611 int i;
612 if (pd_uinfo->num_gd) {
613 for (i = 0; i < pd_uinfo->num_gd; i++)
614 crypto4xx_put_gd_to_gdr(dev);
615 pd_uinfo->first_gd = 0xffffffff;
616 pd_uinfo->num_gd = 0;
617 }
618 if (pd_uinfo->num_sd) {
619 for (i = 0; i < pd_uinfo->num_sd; i++)
620 crypto4xx_put_sd_to_sdr(dev);
621
622 pd_uinfo->first_sd = 0xffffffff;
623 pd_uinfo->num_sd = 0;
624 }
625}
626
627static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
628 struct pd_uinfo *pd_uinfo,
629 struct ce_pd *pd)
630{
631 struct crypto4xx_ctx *ctx;
632 struct ablkcipher_request *ablk_req;
633 struct scatterlist *dst;
634 dma_addr_t addr;
635
636 ablk_req = ablkcipher_request_cast(pd_uinfo->async_req);
637 ctx = crypto_tfm_ctx(ablk_req->base.tfm);
638
639 if (pd_uinfo->using_sd) {
640 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes,
641 ablk_req->dst);
642 } else {
643 dst = pd_uinfo->dest_va;
644 addr = dma_map_page(dev->core_dev->device, sg_page(dst),
645 dst->offset, dst->length, DMA_FROM_DEVICE);
646 }
647 crypto4xx_ret_sg_desc(dev, pd_uinfo);
648 if (ablk_req->base.complete != NULL)
649 ablk_req->base.complete(&ablk_req->base, 0);
650
651 return 0;
652}
653
654static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
655 struct pd_uinfo *pd_uinfo)
656{
657 struct crypto4xx_ctx *ctx;
658 struct ahash_request *ahash_req;
659
660 ahash_req = ahash_request_cast(pd_uinfo->async_req);
661 ctx = crypto_tfm_ctx(ahash_req->base.tfm);
662
663 crypto4xx_copy_digest_to_dst(pd_uinfo,
664 crypto_tfm_ctx(ahash_req->base.tfm));
665 crypto4xx_ret_sg_desc(dev, pd_uinfo);
666 /* call user provided callback function x */
667 if (ahash_req->base.complete != NULL)
668 ahash_req->base.complete(&ahash_req->base, 0);
669
670 return 0;
671}
672
673static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
674{
675 struct ce_pd *pd;
676 struct pd_uinfo *pd_uinfo;
677
678 pd = dev->pdr + sizeof(struct ce_pd)*idx;
679 pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
680 if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
681 CRYPTO_ALG_TYPE_ABLKCIPHER)
682 return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
683 else
684 return crypto4xx_ahash_done(dev, pd_uinfo);
685}
686
687/**
688 * Note: Only use this function to copy items that is word aligned.
689 */
690void crypto4xx_memcpy_le(unsigned int *dst,
691 const unsigned char *buf,
692 int len)
693{
694 u8 *tmp;
695 for (; len >= 4; buf += 4, len -= 4)
696 *dst++ = cpu_to_le32(*(unsigned int *) buf);
697
698 tmp = (u8 *)dst;
699 switch (len) {
700 case 3:
701 *tmp++ = 0;
702 *tmp++ = *(buf+2);
703 *tmp++ = *(buf+1);
704 *tmp++ = *buf;
705 break;
706 case 2:
707 *tmp++ = 0;
708 *tmp++ = 0;
709 *tmp++ = *(buf+1);
710 *tmp++ = *buf;
711 break;
712 case 1:
713 *tmp++ = 0;
714 *tmp++ = 0;
715 *tmp++ = 0;
716 *tmp++ = *buf;
717 break;
718 default:
719 break;
720 }
721}
722
723static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
724{
725 crypto4xx_destroy_pdr(core_dev->dev);
726 crypto4xx_destroy_gdr(core_dev->dev);
727 crypto4xx_destroy_sdr(core_dev->dev);
049359d6
JH
728 iounmap(core_dev->dev->ce_base);
729 kfree(core_dev->dev);
730 kfree(core_dev);
731}
732
733void crypto4xx_return_pd(struct crypto4xx_device *dev,
734 u32 pd_entry, struct ce_pd *pd,
735 struct pd_uinfo *pd_uinfo)
736{
737 /* irq should be already disabled */
738 dev->pdr_head = pd_entry;
739 pd->pd_ctl.w = 0;
740 pd->pd_ctl_len.w = 0;
741 pd_uinfo->state = PD_ENTRY_FREE;
742}
743
049359d6
JH
744static u32 get_next_gd(u32 current)
745{
746 if (current != PPC4XX_LAST_GD)
747 return current + 1;
748 else
749 return 0;
750}
751
752static u32 get_next_sd(u32 current)
753{
754 if (current != PPC4XX_LAST_SD)
755 return current + 1;
756 else
757 return 0;
758}
759
760u32 crypto4xx_build_pd(struct crypto_async_request *req,
761 struct crypto4xx_ctx *ctx,
762 struct scatterlist *src,
763 struct scatterlist *dst,
764 unsigned int datalen,
765 void *iv, u32 iv_len)
766{
767 struct crypto4xx_device *dev = ctx->dev;
768 dma_addr_t addr, pd_dma, sd_dma, gd_dma;
769 struct dynamic_sa_ctl *sa;
770 struct scatterlist *sg;
771 struct ce_gd *gd;
772 struct ce_pd *pd;
773 u32 num_gd, num_sd;
774 u32 fst_gd = 0xffffffff;
775 u32 fst_sd = 0xffffffff;
776 u32 pd_entry;
777 unsigned long flags;
778 struct pd_uinfo *pd_uinfo = NULL;
779 unsigned int nbytes = datalen, idx;
780 unsigned int ivlen = 0;
781 u32 gd_idx = 0;
782
783 /* figure how many gd is needed */
76804eb8 784 num_gd = sg_nents_for_len(src, datalen);
7aff7d0a
LC
785 if ((int)num_gd < 0) {
786 dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
787 return -EINVAL;
788 }
049359d6
JH
789 if (num_gd == 1)
790 num_gd = 0;
791
792 /* figure how many sd is needed */
793 if (sg_is_last(dst) || ctx->is_hash) {
794 num_sd = 0;
795 } else {
796 if (datalen > PPC4XX_SD_BUFFER_SIZE) {
797 num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
798 if (datalen % PPC4XX_SD_BUFFER_SIZE)
799 num_sd++;
800 } else {
801 num_sd = 1;
802 }
803 }
804
805 /*
806 * The follow section of code needs to be protected
807 * The gather ring and scatter ring needs to be consecutive
808 * In case of run out of any kind of descriptor, the descriptor
809 * already got must be return the original place.
810 */
811 spin_lock_irqsave(&dev->core_dev->lock, flags);
812 if (num_gd) {
813 fst_gd = crypto4xx_get_n_gd(dev, num_gd);
814 if (fst_gd == ERING_WAS_FULL) {
815 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
816 return -EAGAIN;
817 }
818 }
819 if (num_sd) {
820 fst_sd = crypto4xx_get_n_sd(dev, num_sd);
821 if (fst_sd == ERING_WAS_FULL) {
822 if (num_gd)
823 dev->gdr_head = fst_gd;
824 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
825 return -EAGAIN;
826 }
827 }
828 pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
829 if (pd_entry == ERING_WAS_FULL) {
830 if (num_gd)
831 dev->gdr_head = fst_gd;
832 if (num_sd)
833 dev->sdr_head = fst_sd;
834 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
835 return -EAGAIN;
836 }
837 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
838
839 pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
840 sizeof(struct pd_uinfo) * pd_entry);
841 pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
842 pd_uinfo->async_req = req;
843 pd_uinfo->num_gd = num_gd;
844 pd_uinfo->num_sd = num_sd;
845
846 if (iv_len || ctx->is_hash) {
847 ivlen = iv_len;
848 pd->sa = pd_uinfo->sa_pa;
849 sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
850 if (ctx->direction == DIR_INBOUND)
851 memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
852 else
853 memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
854
855 memcpy((void *) sa + ctx->offset_to_sr_ptr,
856 &pd_uinfo->sr_pa, 4);
857
858 if (iv_len)
859 crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
860 } else {
861 if (ctx->direction == DIR_INBOUND) {
862 pd->sa = ctx->sa_in_dma_addr;
863 sa = (struct dynamic_sa_ctl *) ctx->sa_in;
864 } else {
865 pd->sa = ctx->sa_out_dma_addr;
866 sa = (struct dynamic_sa_ctl *) ctx->sa_out;
867 }
868 }
869 pd->sa_len = ctx->sa_len;
870 if (num_gd) {
871 /* get first gd we are going to use */
872 gd_idx = fst_gd;
873 pd_uinfo->first_gd = fst_gd;
874 pd_uinfo->num_gd = num_gd;
875 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
876 pd->src = gd_dma;
877 /* enable gather */
878 sa->sa_command_0.bf.gather = 1;
879 idx = 0;
880 src = &src[0];
881 /* walk the sg, and setup gather array */
882 while (nbytes) {
883 sg = &src[idx];
884 addr = dma_map_page(dev->core_dev->device, sg_page(sg),
885 sg->offset, sg->length, DMA_TO_DEVICE);
886 gd->ptr = addr;
887 gd->ctl_len.len = sg->length;
888 gd->ctl_len.done = 0;
889 gd->ctl_len.ready = 1;
890 if (sg->length >= nbytes)
891 break;
892 nbytes -= sg->length;
893 gd_idx = get_next_gd(gd_idx);
894 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
895 idx++;
896 }
897 } else {
898 pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
899 src->offset, src->length, DMA_TO_DEVICE);
900 /*
901 * Disable gather in sa command
902 */
903 sa->sa_command_0.bf.gather = 0;
904 /*
905 * Indicate gather array is not used
906 */
907 pd_uinfo->first_gd = 0xffffffff;
908 pd_uinfo->num_gd = 0;
909 }
910 if (ctx->is_hash || sg_is_last(dst)) {
911 /*
912 * we know application give us dst a whole piece of memory
913 * no need to use scatter ring.
914 * In case of is_hash, the icv is always at end of src data.
915 */
916 pd_uinfo->using_sd = 0;
917 pd_uinfo->first_sd = 0xffffffff;
918 pd_uinfo->num_sd = 0;
919 pd_uinfo->dest_va = dst;
920 sa->sa_command_0.bf.scatter = 0;
921 if (ctx->is_hash)
922 pd->dest = virt_to_phys((void *)dst);
923 else
924 pd->dest = (u32)dma_map_page(dev->core_dev->device,
925 sg_page(dst), dst->offset,
926 dst->length, DMA_TO_DEVICE);
927 } else {
928 struct ce_sd *sd = NULL;
929 u32 sd_idx = fst_sd;
930 nbytes = datalen;
931 sa->sa_command_0.bf.scatter = 1;
932 pd_uinfo->using_sd = 1;
933 pd_uinfo->dest_va = dst;
934 pd_uinfo->first_sd = fst_sd;
935 pd_uinfo->num_sd = num_sd;
936 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
937 pd->dest = sd_dma;
938 /* setup scatter descriptor */
939 sd->ctl.done = 0;
940 sd->ctl.rdy = 1;
941 /* sd->ptr should be setup by sd_init routine*/
942 idx = 0;
943 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
944 nbytes -= PPC4XX_SD_BUFFER_SIZE;
945 else
946 nbytes = 0;
947 while (nbytes) {
948 sd_idx = get_next_sd(sd_idx);
949 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
950 /* setup scatter descriptor */
951 sd->ctl.done = 0;
952 sd->ctl.rdy = 1;
953 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
954 nbytes -= PPC4XX_SD_BUFFER_SIZE;
955 else
956 /*
957 * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
958 * which is more than nbytes, so done.
959 */
960 nbytes = 0;
961 }
962 }
963
964 sa->sa_command_1.bf.hash_crypto_offset = 0;
965 pd->pd_ctl.w = ctx->pd_ctl;
966 pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
967 pd_uinfo->state = PD_ENTRY_INUSE;
968 wmb();
969 /* write any value to push engine to read a pd */
970 writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
971 return -EINPROGRESS;
972}
973
974/**
975 * Algorithm Registration Functions
976 */
977static int crypto4xx_alg_init(struct crypto_tfm *tfm)
978{
979 struct crypto_alg *alg = tfm->__crt_alg;
980 struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
981 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
982
983 ctx->dev = amcc_alg->dev;
984 ctx->sa_in = NULL;
985 ctx->sa_out = NULL;
986 ctx->sa_in_dma_addr = 0;
987 ctx->sa_out_dma_addr = 0;
988 ctx->sa_len = 0;
989
4dc10c01
HX
990 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
991 default:
049359d6 992 tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
4dc10c01
HX
993 break;
994 case CRYPTO_ALG_TYPE_AHASH:
6b1679f4
HX
995 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
996 sizeof(struct crypto4xx_ctx));
4dc10c01
HX
997 break;
998 }
049359d6
JH
999
1000 return 0;
1001}
1002
1003static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
1004{
1005 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
1006
1007 crypto4xx_free_sa(ctx);
1008 crypto4xx_free_state_record(ctx);
1009}
1010
1011int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
4dc10c01
HX
1012 struct crypto4xx_alg_common *crypto_alg,
1013 int array_size)
049359d6
JH
1014{
1015 struct crypto4xx_alg *alg;
1016 int i;
1017 int rc = 0;
1018
1019 for (i = 0; i < array_size; i++) {
1020 alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
1021 if (!alg)
1022 return -ENOMEM;
1023
1024 alg->alg = crypto_alg[i];
049359d6 1025 alg->dev = sec_dev;
4dc10c01
HX
1026
1027 switch (alg->alg.type) {
1028 case CRYPTO_ALG_TYPE_AHASH:
1029 rc = crypto_register_ahash(&alg->alg.u.hash);
1030 break;
1031
1032 default:
1033 rc = crypto_register_alg(&alg->alg.u.cipher);
1034 break;
1035 }
1036
049359d6
JH
1037 if (rc) {
1038 list_del(&alg->entry);
1039 kfree(alg);
1040 } else {
1041 list_add_tail(&alg->entry, &sec_dev->alg_list);
1042 }
1043 }
1044
1045 return 0;
1046}
1047
1048static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1049{
1050 struct crypto4xx_alg *alg, *tmp;
1051
1052 list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1053 list_del(&alg->entry);
4dc10c01
HX
1054 switch (alg->alg.type) {
1055 case CRYPTO_ALG_TYPE_AHASH:
1056 crypto_unregister_ahash(&alg->alg.u.hash);
1057 break;
1058
1059 default:
1060 crypto_unregister_alg(&alg->alg.u.cipher);
1061 }
049359d6
JH
1062 kfree(alg);
1063 }
1064}
1065
1066static void crypto4xx_bh_tasklet_cb(unsigned long data)
1067{
1068 struct device *dev = (struct device *)data;
1069 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1070 struct pd_uinfo *pd_uinfo;
1071 struct ce_pd *pd;
1072 u32 tail;
1073
1074 while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
1075 tail = core_dev->dev->pdr_tail;
1076 pd_uinfo = core_dev->dev->pdr_uinfo +
1077 sizeof(struct pd_uinfo)*tail;
1078 pd = core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
1079 if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
1080 pd->pd_ctl.bf.pe_done &&
1081 !pd->pd_ctl.bf.host_ready) {
1082 pd->pd_ctl.bf.pe_done = 0;
1083 crypto4xx_pd_done(core_dev->dev, tail);
1084 crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1085 pd_uinfo->state = PD_ENTRY_FREE;
1086 } else {
1087 /* if tail not done, break */
1088 break;
1089 }
1090 }
1091}
1092
1093/**
1094 * Top Half of isr.
1095 */
1096static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1097{
1098 struct device *dev = (struct device *)data;
1099 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1100
58268e58 1101 if (!core_dev->dev->ce_base)
049359d6
JH
1102 return 0;
1103
1104 writel(PPC4XX_INTERRUPT_CLR,
1105 core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1106 tasklet_schedule(&core_dev->tasklet);
1107
1108 return IRQ_HANDLED;
1109}
1110
1111/**
1112 * Supported Crypto Algorithms
1113 */
4dc10c01 1114struct crypto4xx_alg_common crypto4xx_alg[] = {
049359d6 1115 /* Crypto AES modes */
4dc10c01 1116 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
049359d6
JH
1117 .cra_name = "cbc(aes)",
1118 .cra_driver_name = "cbc-aes-ppc4xx",
1119 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1120 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1121 .cra_blocksize = AES_BLOCK_SIZE,
1122 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
049359d6 1123 .cra_type = &crypto_ablkcipher_type,
4dc10c01
HX
1124 .cra_init = crypto4xx_alg_init,
1125 .cra_exit = crypto4xx_alg_exit,
049359d6
JH
1126 .cra_module = THIS_MODULE,
1127 .cra_u = {
1128 .ablkcipher = {
1129 .min_keysize = AES_MIN_KEY_SIZE,
1130 .max_keysize = AES_MAX_KEY_SIZE,
1131 .ivsize = AES_IV_SIZE,
1132 .setkey = crypto4xx_setkey_aes_cbc,
1133 .encrypt = crypto4xx_encrypt,
1134 .decrypt = crypto4xx_decrypt,
1135 }
1136 }
4dc10c01 1137 }},
049359d6
JH
1138};
1139
1140/**
1141 * Module Initialization Routine
1142 */
1eb8a1b3 1143static int crypto4xx_probe(struct platform_device *ofdev)
049359d6
JH
1144{
1145 int rc;
1146 struct resource res;
1147 struct device *dev = &ofdev->dev;
1148 struct crypto4xx_core_device *core_dev;
1149
7cea8cc4 1150 rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
049359d6
JH
1151 if (rc)
1152 return -ENODEV;
1153
1154 if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
1155 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1156 mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1157 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1158 mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1159 } else if (of_find_compatible_node(NULL, NULL,
1160 "amcc,ppc405ex-crypto")) {
1161 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1162 mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1163 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1164 mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1165 } else if (of_find_compatible_node(NULL, NULL,
1166 "amcc,ppc460sx-crypto")) {
1167 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1168 mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1169 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1170 mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1171 } else {
1172 printk(KERN_ERR "Crypto Function Not supported!\n");
1173 return -EINVAL;
1174 }
1175
1176 core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1177 if (!core_dev)
1178 return -ENOMEM;
1179
1180 dev_set_drvdata(dev, core_dev);
1181 core_dev->ofdev = ofdev;
1182 core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
1183 if (!core_dev->dev)
1184 goto err_alloc_dev;
1185
1186 core_dev->dev->core_dev = core_dev;
1187 core_dev->device = dev;
1188 spin_lock_init(&core_dev->lock);
1189 INIT_LIST_HEAD(&core_dev->dev->alg_list);
1190 rc = crypto4xx_build_pdr(core_dev->dev);
1191 if (rc)
1192 goto err_build_pdr;
1193
1194 rc = crypto4xx_build_gdr(core_dev->dev);
1195 if (rc)
1196 goto err_build_gdr;
1197
1198 rc = crypto4xx_build_sdr(core_dev->dev);
1199 if (rc)
1200 goto err_build_sdr;
1201
1202 /* Init tasklet for bottom half processing */
1203 tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1204 (unsigned long) dev);
1205
1206 /* Register for Crypto isr, Crypto Engine IRQ */
7cea8cc4 1207 core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
049359d6
JH
1208 rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
1209 core_dev->dev->name, dev);
1210 if (rc)
1211 goto err_request_irq;
1212
7cea8cc4 1213 core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
049359d6
JH
1214 if (!core_dev->dev->ce_base) {
1215 dev_err(dev, "failed to of_iomap\n");
b48ae1df 1216 rc = -ENOMEM;
049359d6
JH
1217 goto err_iomap;
1218 }
1219
1220 /* need to setup pdr, rdr, gdr and sdr before this */
1221 crypto4xx_hw_init(core_dev->dev);
1222
1223 /* Register security algorithms with Linux CryptoAPI */
1224 rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1225 ARRAY_SIZE(crypto4xx_alg));
1226 if (rc)
1227 goto err_start_dev;
1228
5343e674 1229 ppc4xx_trng_probe(core_dev);
049359d6
JH
1230 return 0;
1231
1232err_start_dev:
1233 iounmap(core_dev->dev->ce_base);
1234err_iomap:
1235 free_irq(core_dev->irq, dev);
bdd3f7fa 1236err_request_irq:
049359d6
JH
1237 irq_dispose_mapping(core_dev->irq);
1238 tasklet_kill(&core_dev->tasklet);
049359d6
JH
1239 crypto4xx_destroy_sdr(core_dev->dev);
1240err_build_sdr:
1241 crypto4xx_destroy_gdr(core_dev->dev);
1242err_build_gdr:
1243 crypto4xx_destroy_pdr(core_dev->dev);
1244err_build_pdr:
1245 kfree(core_dev->dev);
1246err_alloc_dev:
1247 kfree(core_dev);
1248
1249 return rc;
1250}
1251
1eb8a1b3 1252static int crypto4xx_remove(struct platform_device *ofdev)
049359d6
JH
1253{
1254 struct device *dev = &ofdev->dev;
1255 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1256
5343e674
CL
1257 ppc4xx_trng_remove(core_dev);
1258
049359d6
JH
1259 free_irq(core_dev->irq, dev);
1260 irq_dispose_mapping(core_dev->irq);
1261
1262 tasklet_kill(&core_dev->tasklet);
1263 /* Un-register with Linux CryptoAPI */
1264 crypto4xx_unregister_alg(core_dev->dev);
1265 /* Free all allocated memory */
1266 crypto4xx_stop_all(core_dev);
1267
1268 return 0;
1269}
1270
6c3f975a 1271static const struct of_device_id crypto4xx_match[] = {
049359d6
JH
1272 { .compatible = "amcc,ppc4xx-crypto",},
1273 { },
1274};
3dc1597b 1275MODULE_DEVICE_TABLE(of, crypto4xx_match);
049359d6 1276
00006124 1277static struct platform_driver crypto4xx_driver = {
4018294b 1278 .driver = {
5343e674 1279 .name = MODULE_NAME,
4018294b
GL
1280 .of_match_table = crypto4xx_match,
1281 },
049359d6 1282 .probe = crypto4xx_probe,
1eb8a1b3 1283 .remove = crypto4xx_remove,
049359d6
JH
1284};
1285
741e8c2d 1286module_platform_driver(crypto4xx_driver);
049359d6
JH
1287
1288MODULE_LICENSE("GPL");
1289MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1290MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");
This page took 0.46732 seconds and 5 git commands to generate.