Commit | Line | Data |
---|---|---|
049359d6 JH |
1 | /** |
2 | * AMCC SoC PPC4xx Crypto Driver | |
3 | * | |
4 | * Copyright (c) 2008 Applied Micro Circuits Corporation. | |
5 | * All rights reserved. James Hsiao <jhsiao@amcc.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License as published by | |
9 | * the Free Software Foundation; either version 2 of the License, or | |
10 | * (at your option) any later version. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * This file implements AMCC crypto offload Linux device driver for use with | |
18 | * Linux CryptoAPI. | |
19 | */ | |
20 | ||
21 | #include <linux/kernel.h> | |
22 | #include <linux/interrupt.h> | |
23 | #include <linux/spinlock_types.h> | |
24 | #include <linux/random.h> | |
25 | #include <linux/scatterlist.h> | |
26 | #include <linux/crypto.h> | |
27 | #include <linux/dma-mapping.h> | |
28 | #include <linux/platform_device.h> | |
29 | #include <linux/init.h> | |
30 | #include <linux/of_platform.h> | |
31 | #include <asm/dcr.h> | |
32 | #include <asm/dcr-regs.h> | |
33 | #include <asm/cacheflush.h> | |
34 | #include <crypto/internal/hash.h> | |
35 | #include <crypto/algapi.h> | |
36 | #include <crypto/aes.h> | |
37 | #include <crypto/sha.h> | |
38 | #include "crypto4xx_reg_def.h" | |
39 | #include "crypto4xx_core.h" | |
40 | #include "crypto4xx_sa.h" | |
41 | ||
42 | #define PPC4XX_SEC_VERSION_STR "0.5" | |
43 | ||
44 | /** | |
45 | * PPC4xx Crypto Engine Initialization Routine | |
46 | */ | |
47 | static void crypto4xx_hw_init(struct crypto4xx_device *dev) | |
48 | { | |
49 | union ce_ring_size ring_size; | |
50 | union ce_ring_contol ring_ctrl; | |
51 | union ce_part_ring_size part_ring_size; | |
52 | union ce_io_threshold io_threshold; | |
53 | u32 rand_num; | |
54 | union ce_pe_dma_cfg pe_dma_cfg; | |
55 | ||
56 | writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG); | |
57 | /* setup pe dma, include reset sg, pdr and pe, then release reset */ | |
58 | pe_dma_cfg.w = 0; | |
59 | pe_dma_cfg.bf.bo_sgpd_en = 1; | |
60 | pe_dma_cfg.bf.bo_data_en = 0; | |
61 | pe_dma_cfg.bf.bo_sa_en = 1; | |
62 | pe_dma_cfg.bf.bo_pd_en = 1; | |
63 | pe_dma_cfg.bf.dynamic_sa_en = 1; | |
64 | pe_dma_cfg.bf.reset_sg = 1; | |
65 | pe_dma_cfg.bf.reset_pdr = 1; | |
66 | pe_dma_cfg.bf.reset_pe = 1; | |
67 | writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG); | |
68 | /* un reset pe,sg and pdr */ | |
69 | pe_dma_cfg.bf.pe_mode = 0; | |
70 | pe_dma_cfg.bf.reset_sg = 0; | |
71 | pe_dma_cfg.bf.reset_pdr = 0; | |
72 | pe_dma_cfg.bf.reset_pe = 0; | |
73 | pe_dma_cfg.bf.bo_td_en = 0; | |
74 | writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG); | |
75 | writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE); | |
76 | writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE); | |
77 | writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL); | |
78 | get_random_bytes(&rand_num, sizeof(rand_num)); | |
79 | writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L); | |
80 | get_random_bytes(&rand_num, sizeof(rand_num)); | |
81 | writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H); | |
82 | ring_size.w = 0; | |
83 | ring_size.bf.ring_offset = PPC4XX_PD_SIZE; | |
84 | ring_size.bf.ring_size = PPC4XX_NUM_PD; | |
85 | writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE); | |
86 | ring_ctrl.w = 0; | |
87 | writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL); | |
88 | writel(PPC4XX_DC_3DES_EN, dev->ce_base + CRYPTO4XX_DEVICE_CTRL); | |
89 | writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE); | |
90 | writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE); | |
91 | part_ring_size.w = 0; | |
92 | part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE; | |
93 | part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE; | |
94 | writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE); | |
95 | writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG); | |
96 | io_threshold.w = 0; | |
97 | io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD; | |
98 | io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD; | |
99 | writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD); | |
100 | writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR); | |
101 | writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR); | |
102 | writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR); | |
103 | writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR); | |
104 | writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR); | |
105 | writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR); | |
106 | writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR); | |
107 | /* un reset pe,sg and pdr */ | |
108 | pe_dma_cfg.bf.pe_mode = 1; | |
109 | pe_dma_cfg.bf.reset_sg = 0; | |
110 | pe_dma_cfg.bf.reset_pdr = 0; | |
111 | pe_dma_cfg.bf.reset_pe = 0; | |
112 | pe_dma_cfg.bf.bo_td_en = 0; | |
113 | writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG); | |
114 | /*clear all pending interrupt*/ | |
115 | writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR); | |
116 | writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT); | |
117 | writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT); | |
118 | writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG); | |
119 | writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN); | |
120 | } | |
121 | ||
122 | int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size) | |
123 | { | |
124 | ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4, | |
125 | &ctx->sa_in_dma_addr, GFP_ATOMIC); | |
126 | if (ctx->sa_in == NULL) | |
127 | return -ENOMEM; | |
128 | ||
129 | ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4, | |
130 | &ctx->sa_out_dma_addr, GFP_ATOMIC); | |
131 | if (ctx->sa_out == NULL) { | |
132 | dma_free_coherent(ctx->dev->core_dev->device, | |
133 | ctx->sa_len * 4, | |
134 | ctx->sa_in, ctx->sa_in_dma_addr); | |
135 | return -ENOMEM; | |
136 | } | |
137 | ||
138 | memset(ctx->sa_in, 0, size * 4); | |
139 | memset(ctx->sa_out, 0, size * 4); | |
140 | ctx->sa_len = size; | |
141 | ||
142 | return 0; | |
143 | } | |
144 | ||
145 | void crypto4xx_free_sa(struct crypto4xx_ctx *ctx) | |
146 | { | |
147 | if (ctx->sa_in != NULL) | |
148 | dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4, | |
149 | ctx->sa_in, ctx->sa_in_dma_addr); | |
150 | if (ctx->sa_out != NULL) | |
151 | dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4, | |
152 | ctx->sa_out, ctx->sa_out_dma_addr); | |
153 | ||
154 | ctx->sa_in_dma_addr = 0; | |
155 | ctx->sa_out_dma_addr = 0; | |
156 | ctx->sa_len = 0; | |
157 | } | |
158 | ||
159 | u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx) | |
160 | { | |
161 | ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device, | |
162 | sizeof(struct sa_state_record), | |
163 | &ctx->state_record_dma_addr, GFP_ATOMIC); | |
164 | if (!ctx->state_record_dma_addr) | |
165 | return -ENOMEM; | |
166 | memset(ctx->state_record, 0, sizeof(struct sa_state_record)); | |
167 | ||
168 | return 0; | |
169 | } | |
170 | ||
171 | void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx) | |
172 | { | |
173 | if (ctx->state_record != NULL) | |
174 | dma_free_coherent(ctx->dev->core_dev->device, | |
175 | sizeof(struct sa_state_record), | |
176 | ctx->state_record, | |
177 | ctx->state_record_dma_addr); | |
178 | ctx->state_record_dma_addr = 0; | |
179 | } | |
180 | ||
181 | /** | |
182 | * alloc memory for the gather ring | |
183 | * no need to alloc buf for the ring | |
184 | * gdr_tail, gdr_head and gdr_count are initialized by this function | |
185 | */ | |
186 | static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) | |
187 | { | |
188 | int i; | |
189 | struct pd_uinfo *pd_uinfo; | |
190 | dev->pdr = dma_alloc_coherent(dev->core_dev->device, | |
191 | sizeof(struct ce_pd) * PPC4XX_NUM_PD, | |
192 | &dev->pdr_pa, GFP_ATOMIC); | |
193 | if (!dev->pdr) | |
194 | return -ENOMEM; | |
195 | ||
196 | dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD, | |
197 | GFP_KERNEL); | |
198 | if (!dev->pdr_uinfo) { | |
199 | dma_free_coherent(dev->core_dev->device, | |
200 | sizeof(struct ce_pd) * PPC4XX_NUM_PD, | |
201 | dev->pdr, | |
202 | dev->pdr_pa); | |
203 | return -ENOMEM; | |
204 | } | |
205 | memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD); | |
206 | dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device, | |
207 | 256 * PPC4XX_NUM_PD, | |
208 | &dev->shadow_sa_pool_pa, | |
209 | GFP_ATOMIC); | |
210 | if (!dev->shadow_sa_pool) | |
211 | return -ENOMEM; | |
212 | ||
213 | dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device, | |
214 | sizeof(struct sa_state_record) * PPC4XX_NUM_PD, | |
215 | &dev->shadow_sr_pool_pa, GFP_ATOMIC); | |
216 | if (!dev->shadow_sr_pool) | |
217 | return -ENOMEM; | |
218 | for (i = 0; i < PPC4XX_NUM_PD; i++) { | |
219 | pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo + | |
220 | sizeof(struct pd_uinfo) * i); | |
221 | ||
222 | /* alloc 256 bytes which is enough for any kind of dynamic sa */ | |
223 | pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i; | |
224 | pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i; | |
225 | ||
226 | /* alloc state record */ | |
227 | pd_uinfo->sr_va = dev->shadow_sr_pool + | |
228 | sizeof(struct sa_state_record) * i; | |
229 | pd_uinfo->sr_pa = dev->shadow_sr_pool_pa + | |
230 | sizeof(struct sa_state_record) * i; | |
231 | } | |
232 | ||
233 | return 0; | |
234 | } | |
235 | ||
236 | static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev) | |
237 | { | |
238 | if (dev->pdr != NULL) | |
239 | dma_free_coherent(dev->core_dev->device, | |
240 | sizeof(struct ce_pd) * PPC4XX_NUM_PD, | |
241 | dev->pdr, dev->pdr_pa); | |
242 | if (dev->shadow_sa_pool) | |
243 | dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD, | |
244 | dev->shadow_sa_pool, dev->shadow_sa_pool_pa); | |
245 | if (dev->shadow_sr_pool) | |
246 | dma_free_coherent(dev->core_dev->device, | |
247 | sizeof(struct sa_state_record) * PPC4XX_NUM_PD, | |
248 | dev->shadow_sr_pool, dev->shadow_sr_pool_pa); | |
249 | ||
250 | kfree(dev->pdr_uinfo); | |
251 | } | |
252 | ||
253 | static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev) | |
254 | { | |
255 | u32 retval; | |
256 | u32 tmp; | |
257 | ||
258 | retval = dev->pdr_head; | |
259 | tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD; | |
260 | ||
261 | if (tmp == dev->pdr_tail) | |
262 | return ERING_WAS_FULL; | |
263 | ||
264 | dev->pdr_head = tmp; | |
265 | ||
266 | return retval; | |
267 | } | |
268 | ||
269 | static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx) | |
270 | { | |
271 | struct pd_uinfo *pd_uinfo; | |
272 | unsigned long flags; | |
273 | ||
274 | pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo + | |
275 | sizeof(struct pd_uinfo) * idx); | |
276 | spin_lock_irqsave(&dev->core_dev->lock, flags); | |
277 | if (dev->pdr_tail != PPC4XX_LAST_PD) | |
278 | dev->pdr_tail++; | |
279 | else | |
280 | dev->pdr_tail = 0; | |
281 | pd_uinfo->state = PD_ENTRY_FREE; | |
282 | spin_unlock_irqrestore(&dev->core_dev->lock, flags); | |
283 | ||
284 | return 0; | |
285 | } | |
286 | ||
287 | static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev, | |
288 | dma_addr_t *pd_dma, u32 idx) | |
289 | { | |
290 | *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx; | |
291 | ||
292 | return dev->pdr + sizeof(struct ce_pd) * idx; | |
293 | } | |
294 | ||
295 | /** | |
296 | * alloc memory for the gather ring | |
297 | * no need to alloc buf for the ring | |
298 | * gdr_tail, gdr_head and gdr_count are initialized by this function | |
299 | */ | |
300 | static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) | |
301 | { | |
302 | dev->gdr = dma_alloc_coherent(dev->core_dev->device, | |
303 | sizeof(struct ce_gd) * PPC4XX_NUM_GD, | |
304 | &dev->gdr_pa, GFP_ATOMIC); | |
305 | if (!dev->gdr) | |
306 | return -ENOMEM; | |
307 | ||
308 | memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD); | |
309 | ||
310 | return 0; | |
311 | } | |
312 | ||
313 | static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev) | |
314 | { | |
315 | dma_free_coherent(dev->core_dev->device, | |
316 | sizeof(struct ce_gd) * PPC4XX_NUM_GD, | |
317 | dev->gdr, dev->gdr_pa); | |
318 | } | |
319 | ||
320 | /* | |
321 | * when this function is called. | |
322 | * preemption or interrupt must be disabled | |
323 | */ | |
324 | u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n) | |
325 | { | |
326 | u32 retval; | |
327 | u32 tmp; | |
328 | if (n >= PPC4XX_NUM_GD) | |
329 | return ERING_WAS_FULL; | |
330 | ||
331 | retval = dev->gdr_head; | |
332 | tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD; | |
333 | if (dev->gdr_head > dev->gdr_tail) { | |
334 | if (tmp < dev->gdr_head && tmp >= dev->gdr_tail) | |
335 | return ERING_WAS_FULL; | |
336 | } else if (dev->gdr_head < dev->gdr_tail) { | |
337 | if (tmp < dev->gdr_head || tmp >= dev->gdr_tail) | |
338 | return ERING_WAS_FULL; | |
339 | } | |
340 | dev->gdr_head = tmp; | |
341 | ||
342 | return retval; | |
343 | } | |
344 | ||
345 | static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev) | |
346 | { | |
347 | unsigned long flags; | |
348 | ||
349 | spin_lock_irqsave(&dev->core_dev->lock, flags); | |
350 | if (dev->gdr_tail == dev->gdr_head) { | |
351 | spin_unlock_irqrestore(&dev->core_dev->lock, flags); | |
352 | return 0; | |
353 | } | |
354 | ||
355 | if (dev->gdr_tail != PPC4XX_LAST_GD) | |
356 | dev->gdr_tail++; | |
357 | else | |
358 | dev->gdr_tail = 0; | |
359 | ||
360 | spin_unlock_irqrestore(&dev->core_dev->lock, flags); | |
361 | ||
362 | return 0; | |
363 | } | |
364 | ||
365 | static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev, | |
366 | dma_addr_t *gd_dma, u32 idx) | |
367 | { | |
368 | *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx; | |
369 | ||
370 | return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx); | |
371 | } | |
372 | ||
373 | /** | |
374 | * alloc memory for the scatter ring | |
375 | * need to alloc buf for the ring | |
376 | * sdr_tail, sdr_head and sdr_count are initialized by this function | |
377 | */ | |
378 | static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev) | |
379 | { | |
380 | int i; | |
381 | struct ce_sd *sd_array; | |
382 | ||
383 | /* alloc memory for scatter descriptor ring */ | |
384 | dev->sdr = dma_alloc_coherent(dev->core_dev->device, | |
385 | sizeof(struct ce_sd) * PPC4XX_NUM_SD, | |
386 | &dev->sdr_pa, GFP_ATOMIC); | |
387 | if (!dev->sdr) | |
388 | return -ENOMEM; | |
389 | ||
390 | dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE; | |
391 | dev->scatter_buffer_va = | |
392 | dma_alloc_coherent(dev->core_dev->device, | |
393 | dev->scatter_buffer_size * PPC4XX_NUM_SD, | |
394 | &dev->scatter_buffer_pa, GFP_ATOMIC); | |
395 | if (!dev->scatter_buffer_va) { | |
396 | dma_free_coherent(dev->core_dev->device, | |
397 | sizeof(struct ce_sd) * PPC4XX_NUM_SD, | |
398 | dev->sdr, dev->sdr_pa); | |
399 | return -ENOMEM; | |
400 | } | |
401 | ||
402 | sd_array = dev->sdr; | |
403 | ||
404 | for (i = 0; i < PPC4XX_NUM_SD; i++) { | |
405 | sd_array[i].ptr = dev->scatter_buffer_pa + | |
406 | dev->scatter_buffer_size * i; | |
407 | } | |
408 | ||
409 | return 0; | |
410 | } | |
411 | ||
412 | static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev) | |
413 | { | |
414 | if (dev->sdr != NULL) | |
415 | dma_free_coherent(dev->core_dev->device, | |
416 | sizeof(struct ce_sd) * PPC4XX_NUM_SD, | |
417 | dev->sdr, dev->sdr_pa); | |
418 | ||
419 | if (dev->scatter_buffer_va != NULL) | |
420 | dma_free_coherent(dev->core_dev->device, | |
421 | dev->scatter_buffer_size * PPC4XX_NUM_SD, | |
422 | dev->scatter_buffer_va, | |
423 | dev->scatter_buffer_pa); | |
424 | } | |
425 | ||
426 | /* | |
427 | * when this function is called. | |
428 | * preemption or interrupt must be disabled | |
429 | */ | |
430 | static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n) | |
431 | { | |
432 | u32 retval; | |
433 | u32 tmp; | |
434 | ||
435 | if (n >= PPC4XX_NUM_SD) | |
436 | return ERING_WAS_FULL; | |
437 | ||
438 | retval = dev->sdr_head; | |
439 | tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD; | |
440 | if (dev->sdr_head > dev->gdr_tail) { | |
441 | if (tmp < dev->sdr_head && tmp >= dev->sdr_tail) | |
442 | return ERING_WAS_FULL; | |
443 | } else if (dev->sdr_head < dev->sdr_tail) { | |
444 | if (tmp < dev->sdr_head || tmp >= dev->sdr_tail) | |
445 | return ERING_WAS_FULL; | |
446 | } /* the head = tail, or empty case is already take cared */ | |
447 | dev->sdr_head = tmp; | |
448 | ||
449 | return retval; | |
450 | } | |
451 | ||
452 | static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev) | |
453 | { | |
454 | unsigned long flags; | |
455 | ||
456 | spin_lock_irqsave(&dev->core_dev->lock, flags); | |
457 | if (dev->sdr_tail == dev->sdr_head) { | |
458 | spin_unlock_irqrestore(&dev->core_dev->lock, flags); | |
459 | return 0; | |
460 | } | |
461 | if (dev->sdr_tail != PPC4XX_LAST_SD) | |
462 | dev->sdr_tail++; | |
463 | else | |
464 | dev->sdr_tail = 0; | |
465 | spin_unlock_irqrestore(&dev->core_dev->lock, flags); | |
466 | ||
467 | return 0; | |
468 | } | |
469 | ||
470 | static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev, | |
471 | dma_addr_t *sd_dma, u32 idx) | |
472 | { | |
473 | *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx; | |
474 | ||
475 | return (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx); | |
476 | } | |
477 | ||
478 | static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev, | |
479 | dma_addr_t *addr, u32 *length, | |
480 | u32 *idx, u32 *offset, u32 *nbytes) | |
481 | { | |
482 | u32 len; | |
483 | ||
484 | if (*length > dev->scatter_buffer_size) { | |
485 | memcpy(phys_to_virt(*addr), | |
486 | dev->scatter_buffer_va + | |
487 | *idx * dev->scatter_buffer_size + *offset, | |
488 | dev->scatter_buffer_size); | |
489 | *offset = 0; | |
490 | *length -= dev->scatter_buffer_size; | |
491 | *nbytes -= dev->scatter_buffer_size; | |
492 | if (*idx == PPC4XX_LAST_SD) | |
493 | *idx = 0; | |
494 | else | |
495 | (*idx)++; | |
496 | *addr = *addr + dev->scatter_buffer_size; | |
497 | return 1; | |
498 | } else if (*length < dev->scatter_buffer_size) { | |
499 | memcpy(phys_to_virt(*addr), | |
500 | dev->scatter_buffer_va + | |
501 | *idx * dev->scatter_buffer_size + *offset, *length); | |
502 | if ((*offset + *length) == dev->scatter_buffer_size) { | |
503 | if (*idx == PPC4XX_LAST_SD) | |
504 | *idx = 0; | |
505 | else | |
506 | (*idx)++; | |
507 | *nbytes -= *length; | |
508 | *offset = 0; | |
509 | } else { | |
510 | *nbytes -= *length; | |
511 | *offset += *length; | |
512 | } | |
513 | ||
514 | return 0; | |
515 | } else { | |
516 | len = (*nbytes <= dev->scatter_buffer_size) ? | |
517 | (*nbytes) : dev->scatter_buffer_size; | |
518 | memcpy(phys_to_virt(*addr), | |
519 | dev->scatter_buffer_va + | |
520 | *idx * dev->scatter_buffer_size + *offset, | |
521 | len); | |
522 | *offset = 0; | |
523 | *nbytes -= len; | |
524 | ||
525 | if (*idx == PPC4XX_LAST_SD) | |
526 | *idx = 0; | |
527 | else | |
528 | (*idx)++; | |
529 | ||
530 | return 0; | |
531 | } | |
532 | } | |
533 | ||
534 | static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev, | |
535 | struct ce_pd *pd, | |
536 | struct pd_uinfo *pd_uinfo, | |
537 | u32 nbytes, | |
538 | struct scatterlist *dst) | |
539 | { | |
540 | dma_addr_t addr; | |
541 | u32 this_sd; | |
542 | u32 offset; | |
543 | u32 len; | |
544 | u32 i; | |
545 | u32 sg_len; | |
546 | struct scatterlist *sg; | |
547 | ||
548 | this_sd = pd_uinfo->first_sd; | |
549 | offset = 0; | |
550 | i = 0; | |
551 | ||
552 | while (nbytes) { | |
553 | sg = &dst[i]; | |
554 | sg_len = sg->length; | |
555 | addr = dma_map_page(dev->core_dev->device, sg_page(sg), | |
556 | sg->offset, sg->length, DMA_TO_DEVICE); | |
557 | ||
558 | if (offset == 0) { | |
559 | len = (nbytes <= sg->length) ? nbytes : sg->length; | |
560 | while (crypto4xx_fill_one_page(dev, &addr, &len, | |
561 | &this_sd, &offset, &nbytes)) | |
562 | ; | |
563 | if (!nbytes) | |
564 | return; | |
565 | i++; | |
566 | } else { | |
567 | len = (nbytes <= (dev->scatter_buffer_size - offset)) ? | |
568 | nbytes : (dev->scatter_buffer_size - offset); | |
569 | len = (sg->length < len) ? sg->length : len; | |
570 | while (crypto4xx_fill_one_page(dev, &addr, &len, | |
571 | &this_sd, &offset, &nbytes)) | |
572 | ; | |
573 | if (!nbytes) | |
574 | return; | |
575 | sg_len -= len; | |
576 | if (sg_len) { | |
577 | addr += len; | |
578 | while (crypto4xx_fill_one_page(dev, &addr, | |
579 | &sg_len, &this_sd, &offset, &nbytes)) | |
580 | ; | |
581 | } | |
582 | i++; | |
583 | } | |
584 | } | |
585 | } | |
586 | ||
587 | static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo, | |
588 | struct crypto4xx_ctx *ctx) | |
589 | { | |
590 | struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in; | |
591 | struct sa_state_record *state_record = | |
592 | (struct sa_state_record *) pd_uinfo->sr_va; | |
593 | ||
594 | if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) { | |
595 | memcpy((void *) pd_uinfo->dest_va, state_record->save_digest, | |
596 | SA_HASH_ALG_SHA1_DIGEST_SIZE); | |
597 | } | |
598 | ||
599 | return 0; | |
600 | } | |
601 | ||
602 | static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev, | |
603 | struct pd_uinfo *pd_uinfo) | |
604 | { | |
605 | int i; | |
606 | if (pd_uinfo->num_gd) { | |
607 | for (i = 0; i < pd_uinfo->num_gd; i++) | |
608 | crypto4xx_put_gd_to_gdr(dev); | |
609 | pd_uinfo->first_gd = 0xffffffff; | |
610 | pd_uinfo->num_gd = 0; | |
611 | } | |
612 | if (pd_uinfo->num_sd) { | |
613 | for (i = 0; i < pd_uinfo->num_sd; i++) | |
614 | crypto4xx_put_sd_to_sdr(dev); | |
615 | ||
616 | pd_uinfo->first_sd = 0xffffffff; | |
617 | pd_uinfo->num_sd = 0; | |
618 | } | |
619 | } | |
620 | ||
621 | static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev, | |
622 | struct pd_uinfo *pd_uinfo, | |
623 | struct ce_pd *pd) | |
624 | { | |
625 | struct crypto4xx_ctx *ctx; | |
626 | struct ablkcipher_request *ablk_req; | |
627 | struct scatterlist *dst; | |
628 | dma_addr_t addr; | |
629 | ||
630 | ablk_req = ablkcipher_request_cast(pd_uinfo->async_req); | |
631 | ctx = crypto_tfm_ctx(ablk_req->base.tfm); | |
632 | ||
633 | if (pd_uinfo->using_sd) { | |
634 | crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes, | |
635 | ablk_req->dst); | |
636 | } else { | |
637 | dst = pd_uinfo->dest_va; | |
638 | addr = dma_map_page(dev->core_dev->device, sg_page(dst), | |
639 | dst->offset, dst->length, DMA_FROM_DEVICE); | |
640 | } | |
641 | crypto4xx_ret_sg_desc(dev, pd_uinfo); | |
642 | if (ablk_req->base.complete != NULL) | |
643 | ablk_req->base.complete(&ablk_req->base, 0); | |
644 | ||
645 | return 0; | |
646 | } | |
647 | ||
648 | static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev, | |
649 | struct pd_uinfo *pd_uinfo) | |
650 | { | |
651 | struct crypto4xx_ctx *ctx; | |
652 | struct ahash_request *ahash_req; | |
653 | ||
654 | ahash_req = ahash_request_cast(pd_uinfo->async_req); | |
655 | ctx = crypto_tfm_ctx(ahash_req->base.tfm); | |
656 | ||
657 | crypto4xx_copy_digest_to_dst(pd_uinfo, | |
658 | crypto_tfm_ctx(ahash_req->base.tfm)); | |
659 | crypto4xx_ret_sg_desc(dev, pd_uinfo); | |
660 | /* call user provided callback function x */ | |
661 | if (ahash_req->base.complete != NULL) | |
662 | ahash_req->base.complete(&ahash_req->base, 0); | |
663 | ||
664 | return 0; | |
665 | } | |
666 | ||
667 | static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx) | |
668 | { | |
669 | struct ce_pd *pd; | |
670 | struct pd_uinfo *pd_uinfo; | |
671 | ||
672 | pd = dev->pdr + sizeof(struct ce_pd)*idx; | |
673 | pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx; | |
674 | if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) == | |
675 | CRYPTO_ALG_TYPE_ABLKCIPHER) | |
676 | return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd); | |
677 | else | |
678 | return crypto4xx_ahash_done(dev, pd_uinfo); | |
679 | } | |
680 | ||
681 | /** | |
682 | * Note: Only use this function to copy items that is word aligned. | |
683 | */ | |
684 | void crypto4xx_memcpy_le(unsigned int *dst, | |
685 | const unsigned char *buf, | |
686 | int len) | |
687 | { | |
688 | u8 *tmp; | |
689 | for (; len >= 4; buf += 4, len -= 4) | |
690 | *dst++ = cpu_to_le32(*(unsigned int *) buf); | |
691 | ||
692 | tmp = (u8 *)dst; | |
693 | switch (len) { | |
694 | case 3: | |
695 | *tmp++ = 0; | |
696 | *tmp++ = *(buf+2); | |
697 | *tmp++ = *(buf+1); | |
698 | *tmp++ = *buf; | |
699 | break; | |
700 | case 2: | |
701 | *tmp++ = 0; | |
702 | *tmp++ = 0; | |
703 | *tmp++ = *(buf+1); | |
704 | *tmp++ = *buf; | |
705 | break; | |
706 | case 1: | |
707 | *tmp++ = 0; | |
708 | *tmp++ = 0; | |
709 | *tmp++ = 0; | |
710 | *tmp++ = *buf; | |
711 | break; | |
712 | default: | |
713 | break; | |
714 | } | |
715 | } | |
716 | ||
717 | static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev) | |
718 | { | |
719 | crypto4xx_destroy_pdr(core_dev->dev); | |
720 | crypto4xx_destroy_gdr(core_dev->dev); | |
721 | crypto4xx_destroy_sdr(core_dev->dev); | |
722 | dev_set_drvdata(core_dev->device, NULL); | |
723 | iounmap(core_dev->dev->ce_base); | |
724 | kfree(core_dev->dev); | |
725 | kfree(core_dev); | |
726 | } | |
727 | ||
728 | void crypto4xx_return_pd(struct crypto4xx_device *dev, | |
729 | u32 pd_entry, struct ce_pd *pd, | |
730 | struct pd_uinfo *pd_uinfo) | |
731 | { | |
732 | /* irq should be already disabled */ | |
733 | dev->pdr_head = pd_entry; | |
734 | pd->pd_ctl.w = 0; | |
735 | pd->pd_ctl_len.w = 0; | |
736 | pd_uinfo->state = PD_ENTRY_FREE; | |
737 | } | |
738 | ||
739 | /* | |
740 | * derive number of elements in scatterlist | |
741 | * Shamlessly copy from talitos.c | |
742 | */ | |
743 | static int get_sg_count(struct scatterlist *sg_list, int nbytes) | |
744 | { | |
745 | struct scatterlist *sg = sg_list; | |
746 | int sg_nents = 0; | |
747 | ||
748 | while (nbytes) { | |
749 | sg_nents++; | |
750 | if (sg->length > nbytes) | |
751 | break; | |
752 | nbytes -= sg->length; | |
753 | sg = sg_next(sg); | |
754 | } | |
755 | ||
756 | return sg_nents; | |
757 | } | |
758 | ||
759 | static u32 get_next_gd(u32 current) | |
760 | { | |
761 | if (current != PPC4XX_LAST_GD) | |
762 | return current + 1; | |
763 | else | |
764 | return 0; | |
765 | } | |
766 | ||
767 | static u32 get_next_sd(u32 current) | |
768 | { | |
769 | if (current != PPC4XX_LAST_SD) | |
770 | return current + 1; | |
771 | else | |
772 | return 0; | |
773 | } | |
774 | ||
775 | u32 crypto4xx_build_pd(struct crypto_async_request *req, | |
776 | struct crypto4xx_ctx *ctx, | |
777 | struct scatterlist *src, | |
778 | struct scatterlist *dst, | |
779 | unsigned int datalen, | |
780 | void *iv, u32 iv_len) | |
781 | { | |
782 | struct crypto4xx_device *dev = ctx->dev; | |
783 | dma_addr_t addr, pd_dma, sd_dma, gd_dma; | |
784 | struct dynamic_sa_ctl *sa; | |
785 | struct scatterlist *sg; | |
786 | struct ce_gd *gd; | |
787 | struct ce_pd *pd; | |
788 | u32 num_gd, num_sd; | |
789 | u32 fst_gd = 0xffffffff; | |
790 | u32 fst_sd = 0xffffffff; | |
791 | u32 pd_entry; | |
792 | unsigned long flags; | |
793 | struct pd_uinfo *pd_uinfo = NULL; | |
794 | unsigned int nbytes = datalen, idx; | |
795 | unsigned int ivlen = 0; | |
796 | u32 gd_idx = 0; | |
797 | ||
798 | /* figure how many gd is needed */ | |
799 | num_gd = get_sg_count(src, datalen); | |
800 | if (num_gd == 1) | |
801 | num_gd = 0; | |
802 | ||
803 | /* figure how many sd is needed */ | |
804 | if (sg_is_last(dst) || ctx->is_hash) { | |
805 | num_sd = 0; | |
806 | } else { | |
807 | if (datalen > PPC4XX_SD_BUFFER_SIZE) { | |
808 | num_sd = datalen / PPC4XX_SD_BUFFER_SIZE; | |
809 | if (datalen % PPC4XX_SD_BUFFER_SIZE) | |
810 | num_sd++; | |
811 | } else { | |
812 | num_sd = 1; | |
813 | } | |
814 | } | |
815 | ||
816 | /* | |
817 | * The follow section of code needs to be protected | |
818 | * The gather ring and scatter ring needs to be consecutive | |
819 | * In case of run out of any kind of descriptor, the descriptor | |
820 | * already got must be return the original place. | |
821 | */ | |
822 | spin_lock_irqsave(&dev->core_dev->lock, flags); | |
823 | if (num_gd) { | |
824 | fst_gd = crypto4xx_get_n_gd(dev, num_gd); | |
825 | if (fst_gd == ERING_WAS_FULL) { | |
826 | spin_unlock_irqrestore(&dev->core_dev->lock, flags); | |
827 | return -EAGAIN; | |
828 | } | |
829 | } | |
830 | if (num_sd) { | |
831 | fst_sd = crypto4xx_get_n_sd(dev, num_sd); | |
832 | if (fst_sd == ERING_WAS_FULL) { | |
833 | if (num_gd) | |
834 | dev->gdr_head = fst_gd; | |
835 | spin_unlock_irqrestore(&dev->core_dev->lock, flags); | |
836 | return -EAGAIN; | |
837 | } | |
838 | } | |
839 | pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev); | |
840 | if (pd_entry == ERING_WAS_FULL) { | |
841 | if (num_gd) | |
842 | dev->gdr_head = fst_gd; | |
843 | if (num_sd) | |
844 | dev->sdr_head = fst_sd; | |
845 | spin_unlock_irqrestore(&dev->core_dev->lock, flags); | |
846 | return -EAGAIN; | |
847 | } | |
848 | spin_unlock_irqrestore(&dev->core_dev->lock, flags); | |
849 | ||
850 | pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo + | |
851 | sizeof(struct pd_uinfo) * pd_entry); | |
852 | pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry); | |
853 | pd_uinfo->async_req = req; | |
854 | pd_uinfo->num_gd = num_gd; | |
855 | pd_uinfo->num_sd = num_sd; | |
856 | ||
857 | if (iv_len || ctx->is_hash) { | |
858 | ivlen = iv_len; | |
859 | pd->sa = pd_uinfo->sa_pa; | |
860 | sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va; | |
861 | if (ctx->direction == DIR_INBOUND) | |
862 | memcpy(sa, ctx->sa_in, ctx->sa_len * 4); | |
863 | else | |
864 | memcpy(sa, ctx->sa_out, ctx->sa_len * 4); | |
865 | ||
866 | memcpy((void *) sa + ctx->offset_to_sr_ptr, | |
867 | &pd_uinfo->sr_pa, 4); | |
868 | ||
869 | if (iv_len) | |
870 | crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len); | |
871 | } else { | |
872 | if (ctx->direction == DIR_INBOUND) { | |
873 | pd->sa = ctx->sa_in_dma_addr; | |
874 | sa = (struct dynamic_sa_ctl *) ctx->sa_in; | |
875 | } else { | |
876 | pd->sa = ctx->sa_out_dma_addr; | |
877 | sa = (struct dynamic_sa_ctl *) ctx->sa_out; | |
878 | } | |
879 | } | |
880 | pd->sa_len = ctx->sa_len; | |
881 | if (num_gd) { | |
882 | /* get first gd we are going to use */ | |
883 | gd_idx = fst_gd; | |
884 | pd_uinfo->first_gd = fst_gd; | |
885 | pd_uinfo->num_gd = num_gd; | |
886 | gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx); | |
887 | pd->src = gd_dma; | |
888 | /* enable gather */ | |
889 | sa->sa_command_0.bf.gather = 1; | |
890 | idx = 0; | |
891 | src = &src[0]; | |
892 | /* walk the sg, and setup gather array */ | |
893 | while (nbytes) { | |
894 | sg = &src[idx]; | |
895 | addr = dma_map_page(dev->core_dev->device, sg_page(sg), | |
896 | sg->offset, sg->length, DMA_TO_DEVICE); | |
897 | gd->ptr = addr; | |
898 | gd->ctl_len.len = sg->length; | |
899 | gd->ctl_len.done = 0; | |
900 | gd->ctl_len.ready = 1; | |
901 | if (sg->length >= nbytes) | |
902 | break; | |
903 | nbytes -= sg->length; | |
904 | gd_idx = get_next_gd(gd_idx); | |
905 | gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx); | |
906 | idx++; | |
907 | } | |
908 | } else { | |
909 | pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src), | |
910 | src->offset, src->length, DMA_TO_DEVICE); | |
911 | /* | |
912 | * Disable gather in sa command | |
913 | */ | |
914 | sa->sa_command_0.bf.gather = 0; | |
915 | /* | |
916 | * Indicate gather array is not used | |
917 | */ | |
918 | pd_uinfo->first_gd = 0xffffffff; | |
919 | pd_uinfo->num_gd = 0; | |
920 | } | |
921 | if (ctx->is_hash || sg_is_last(dst)) { | |
922 | /* | |
923 | * we know application give us dst a whole piece of memory | |
924 | * no need to use scatter ring. | |
925 | * In case of is_hash, the icv is always at end of src data. | |
926 | */ | |
927 | pd_uinfo->using_sd = 0; | |
928 | pd_uinfo->first_sd = 0xffffffff; | |
929 | pd_uinfo->num_sd = 0; | |
930 | pd_uinfo->dest_va = dst; | |
931 | sa->sa_command_0.bf.scatter = 0; | |
932 | if (ctx->is_hash) | |
933 | pd->dest = virt_to_phys((void *)dst); | |
934 | else | |
935 | pd->dest = (u32)dma_map_page(dev->core_dev->device, | |
936 | sg_page(dst), dst->offset, | |
937 | dst->length, DMA_TO_DEVICE); | |
938 | } else { | |
939 | struct ce_sd *sd = NULL; | |
940 | u32 sd_idx = fst_sd; | |
941 | nbytes = datalen; | |
942 | sa->sa_command_0.bf.scatter = 1; | |
943 | pd_uinfo->using_sd = 1; | |
944 | pd_uinfo->dest_va = dst; | |
945 | pd_uinfo->first_sd = fst_sd; | |
946 | pd_uinfo->num_sd = num_sd; | |
947 | sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx); | |
948 | pd->dest = sd_dma; | |
949 | /* setup scatter descriptor */ | |
950 | sd->ctl.done = 0; | |
951 | sd->ctl.rdy = 1; | |
952 | /* sd->ptr should be setup by sd_init routine*/ | |
953 | idx = 0; | |
954 | if (nbytes >= PPC4XX_SD_BUFFER_SIZE) | |
955 | nbytes -= PPC4XX_SD_BUFFER_SIZE; | |
956 | else | |
957 | nbytes = 0; | |
958 | while (nbytes) { | |
959 | sd_idx = get_next_sd(sd_idx); | |
960 | sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx); | |
961 | /* setup scatter descriptor */ | |
962 | sd->ctl.done = 0; | |
963 | sd->ctl.rdy = 1; | |
964 | if (nbytes >= PPC4XX_SD_BUFFER_SIZE) | |
965 | nbytes -= PPC4XX_SD_BUFFER_SIZE; | |
966 | else | |
967 | /* | |
968 | * SD entry can hold PPC4XX_SD_BUFFER_SIZE, | |
969 | * which is more than nbytes, so done. | |
970 | */ | |
971 | nbytes = 0; | |
972 | } | |
973 | } | |
974 | ||
975 | sa->sa_command_1.bf.hash_crypto_offset = 0; | |
976 | pd->pd_ctl.w = ctx->pd_ctl; | |
977 | pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen; | |
978 | pd_uinfo->state = PD_ENTRY_INUSE; | |
979 | wmb(); | |
980 | /* write any value to push engine to read a pd */ | |
981 | writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD); | |
982 | return -EINPROGRESS; | |
983 | } | |
984 | ||
985 | /** | |
986 | * Algorithm Registration Functions | |
987 | */ | |
988 | static int crypto4xx_alg_init(struct crypto_tfm *tfm) | |
989 | { | |
990 | struct crypto_alg *alg = tfm->__crt_alg; | |
991 | struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg); | |
992 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); | |
993 | ||
994 | ctx->dev = amcc_alg->dev; | |
995 | ctx->sa_in = NULL; | |
996 | ctx->sa_out = NULL; | |
997 | ctx->sa_in_dma_addr = 0; | |
998 | ctx->sa_out_dma_addr = 0; | |
999 | ctx->sa_len = 0; | |
1000 | ||
1001 | if (alg->cra_type == &crypto_ablkcipher_type) | |
1002 | tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx); | |
1003 | else if (alg->cra_type == &crypto_ahash_type) | |
1004 | tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx); | |
1005 | ||
1006 | return 0; | |
1007 | } | |
1008 | ||
1009 | static void crypto4xx_alg_exit(struct crypto_tfm *tfm) | |
1010 | { | |
1011 | struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); | |
1012 | ||
1013 | crypto4xx_free_sa(ctx); | |
1014 | crypto4xx_free_state_record(ctx); | |
1015 | } | |
1016 | ||
1017 | int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, | |
1018 | struct crypto_alg *crypto_alg, int array_size) | |
1019 | { | |
1020 | struct crypto4xx_alg *alg; | |
1021 | int i; | |
1022 | int rc = 0; | |
1023 | ||
1024 | for (i = 0; i < array_size; i++) { | |
1025 | alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL); | |
1026 | if (!alg) | |
1027 | return -ENOMEM; | |
1028 | ||
1029 | alg->alg = crypto_alg[i]; | |
1030 | INIT_LIST_HEAD(&alg->alg.cra_list); | |
1031 | if (alg->alg.cra_init == NULL) | |
1032 | alg->alg.cra_init = crypto4xx_alg_init; | |
1033 | if (alg->alg.cra_exit == NULL) | |
1034 | alg->alg.cra_exit = crypto4xx_alg_exit; | |
1035 | alg->dev = sec_dev; | |
1036 | rc = crypto_register_alg(&alg->alg); | |
1037 | if (rc) { | |
1038 | list_del(&alg->entry); | |
1039 | kfree(alg); | |
1040 | } else { | |
1041 | list_add_tail(&alg->entry, &sec_dev->alg_list); | |
1042 | } | |
1043 | } | |
1044 | ||
1045 | return 0; | |
1046 | } | |
1047 | ||
1048 | static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev) | |
1049 | { | |
1050 | struct crypto4xx_alg *alg, *tmp; | |
1051 | ||
1052 | list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) { | |
1053 | list_del(&alg->entry); | |
1054 | crypto_unregister_alg(&alg->alg); | |
1055 | kfree(alg); | |
1056 | } | |
1057 | } | |
1058 | ||
1059 | static void crypto4xx_bh_tasklet_cb(unsigned long data) | |
1060 | { | |
1061 | struct device *dev = (struct device *)data; | |
1062 | struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); | |
1063 | struct pd_uinfo *pd_uinfo; | |
1064 | struct ce_pd *pd; | |
1065 | u32 tail; | |
1066 | ||
1067 | while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) { | |
1068 | tail = core_dev->dev->pdr_tail; | |
1069 | pd_uinfo = core_dev->dev->pdr_uinfo + | |
1070 | sizeof(struct pd_uinfo)*tail; | |
1071 | pd = core_dev->dev->pdr + sizeof(struct ce_pd) * tail; | |
1072 | if ((pd_uinfo->state == PD_ENTRY_INUSE) && | |
1073 | pd->pd_ctl.bf.pe_done && | |
1074 | !pd->pd_ctl.bf.host_ready) { | |
1075 | pd->pd_ctl.bf.pe_done = 0; | |
1076 | crypto4xx_pd_done(core_dev->dev, tail); | |
1077 | crypto4xx_put_pd_to_pdr(core_dev->dev, tail); | |
1078 | pd_uinfo->state = PD_ENTRY_FREE; | |
1079 | } else { | |
1080 | /* if tail not done, break */ | |
1081 | break; | |
1082 | } | |
1083 | } | |
1084 | } | |
1085 | ||
1086 | /** | |
1087 | * Top Half of isr. | |
1088 | */ | |
1089 | static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data) | |
1090 | { | |
1091 | struct device *dev = (struct device *)data; | |
1092 | struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); | |
1093 | ||
1094 | if (core_dev->dev->ce_base == 0) | |
1095 | return 0; | |
1096 | ||
1097 | writel(PPC4XX_INTERRUPT_CLR, | |
1098 | core_dev->dev->ce_base + CRYPTO4XX_INT_CLR); | |
1099 | tasklet_schedule(&core_dev->tasklet); | |
1100 | ||
1101 | return IRQ_HANDLED; | |
1102 | } | |
1103 | ||
1104 | /** | |
1105 | * Supported Crypto Algorithms | |
1106 | */ | |
1107 | struct crypto_alg crypto4xx_alg[] = { | |
1108 | /* Crypto AES modes */ | |
1109 | { | |
1110 | .cra_name = "cbc(aes)", | |
1111 | .cra_driver_name = "cbc-aes-ppc4xx", | |
1112 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, | |
1113 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | |
1114 | .cra_blocksize = AES_BLOCK_SIZE, | |
1115 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), | |
1116 | .cra_alignmask = 0, | |
1117 | .cra_type = &crypto_ablkcipher_type, | |
1118 | .cra_module = THIS_MODULE, | |
1119 | .cra_u = { | |
1120 | .ablkcipher = { | |
1121 | .min_keysize = AES_MIN_KEY_SIZE, | |
1122 | .max_keysize = AES_MAX_KEY_SIZE, | |
1123 | .ivsize = AES_IV_SIZE, | |
1124 | .setkey = crypto4xx_setkey_aes_cbc, | |
1125 | .encrypt = crypto4xx_encrypt, | |
1126 | .decrypt = crypto4xx_decrypt, | |
1127 | } | |
1128 | } | |
1129 | }, | |
1130 | /* Hash SHA1 */ | |
1131 | { | |
1132 | .cra_name = "sha1", | |
1133 | .cra_driver_name = "sha1-ppc4xx", | |
1134 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, | |
1135 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, | |
1136 | .cra_blocksize = SHA1_BLOCK_SIZE, | |
1137 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), | |
1138 | .cra_alignmask = 0, | |
1139 | .cra_type = &crypto_ahash_type, | |
1140 | .cra_init = crypto4xx_sha1_alg_init, | |
1141 | .cra_module = THIS_MODULE, | |
1142 | .cra_u = { | |
1143 | .ahash = { | |
1144 | .digestsize = SHA1_DIGEST_SIZE, | |
1145 | .init = crypto4xx_hash_init, | |
1146 | .update = crypto4xx_hash_update, | |
1147 | .final = crypto4xx_hash_final, | |
1148 | .digest = crypto4xx_hash_digest, | |
1149 | } | |
1150 | } | |
1151 | }, | |
1152 | }; | |
1153 | ||
1154 | /** | |
1155 | * Module Initialization Routine | |
1156 | */ | |
1157 | static int __init crypto4xx_probe(struct of_device *ofdev, | |
1158 | const struct of_device_id *match) | |
1159 | { | |
1160 | int rc; | |
1161 | struct resource res; | |
1162 | struct device *dev = &ofdev->dev; | |
1163 | struct crypto4xx_core_device *core_dev; | |
1164 | ||
1165 | rc = of_address_to_resource(ofdev->node, 0, &res); | |
1166 | if (rc) | |
1167 | return -ENODEV; | |
1168 | ||
1169 | if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) { | |
1170 | mtdcri(SDR0, PPC460EX_SDR0_SRST, | |
1171 | mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET); | |
1172 | mtdcri(SDR0, PPC460EX_SDR0_SRST, | |
1173 | mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET); | |
1174 | } else if (of_find_compatible_node(NULL, NULL, | |
1175 | "amcc,ppc405ex-crypto")) { | |
1176 | mtdcri(SDR0, PPC405EX_SDR0_SRST, | |
1177 | mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET); | |
1178 | mtdcri(SDR0, PPC405EX_SDR0_SRST, | |
1179 | mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET); | |
1180 | } else if (of_find_compatible_node(NULL, NULL, | |
1181 | "amcc,ppc460sx-crypto")) { | |
1182 | mtdcri(SDR0, PPC460SX_SDR0_SRST, | |
1183 | mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET); | |
1184 | mtdcri(SDR0, PPC460SX_SDR0_SRST, | |
1185 | mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET); | |
1186 | } else { | |
1187 | printk(KERN_ERR "Crypto Function Not supported!\n"); | |
1188 | return -EINVAL; | |
1189 | } | |
1190 | ||
1191 | core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL); | |
1192 | if (!core_dev) | |
1193 | return -ENOMEM; | |
1194 | ||
1195 | dev_set_drvdata(dev, core_dev); | |
1196 | core_dev->ofdev = ofdev; | |
1197 | core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL); | |
1198 | if (!core_dev->dev) | |
1199 | goto err_alloc_dev; | |
1200 | ||
1201 | core_dev->dev->core_dev = core_dev; | |
1202 | core_dev->device = dev; | |
1203 | spin_lock_init(&core_dev->lock); | |
1204 | INIT_LIST_HEAD(&core_dev->dev->alg_list); | |
1205 | rc = crypto4xx_build_pdr(core_dev->dev); | |
1206 | if (rc) | |
1207 | goto err_build_pdr; | |
1208 | ||
1209 | rc = crypto4xx_build_gdr(core_dev->dev); | |
1210 | if (rc) | |
1211 | goto err_build_gdr; | |
1212 | ||
1213 | rc = crypto4xx_build_sdr(core_dev->dev); | |
1214 | if (rc) | |
1215 | goto err_build_sdr; | |
1216 | ||
1217 | /* Init tasklet for bottom half processing */ | |
1218 | tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb, | |
1219 | (unsigned long) dev); | |
1220 | ||
1221 | /* Register for Crypto isr, Crypto Engine IRQ */ | |
1222 | core_dev->irq = irq_of_parse_and_map(ofdev->node, 0); | |
1223 | rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0, | |
1224 | core_dev->dev->name, dev); | |
1225 | if (rc) | |
1226 | goto err_request_irq; | |
1227 | ||
1228 | core_dev->dev->ce_base = of_iomap(ofdev->node, 0); | |
1229 | if (!core_dev->dev->ce_base) { | |
1230 | dev_err(dev, "failed to of_iomap\n"); | |
1231 | goto err_iomap; | |
1232 | } | |
1233 | ||
1234 | /* need to setup pdr, rdr, gdr and sdr before this */ | |
1235 | crypto4xx_hw_init(core_dev->dev); | |
1236 | ||
1237 | /* Register security algorithms with Linux CryptoAPI */ | |
1238 | rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg, | |
1239 | ARRAY_SIZE(crypto4xx_alg)); | |
1240 | if (rc) | |
1241 | goto err_start_dev; | |
1242 | ||
1243 | return 0; | |
1244 | ||
1245 | err_start_dev: | |
1246 | iounmap(core_dev->dev->ce_base); | |
1247 | err_iomap: | |
1248 | free_irq(core_dev->irq, dev); | |
1249 | irq_dispose_mapping(core_dev->irq); | |
1250 | tasklet_kill(&core_dev->tasklet); | |
1251 | err_request_irq: | |
1252 | crypto4xx_destroy_sdr(core_dev->dev); | |
1253 | err_build_sdr: | |
1254 | crypto4xx_destroy_gdr(core_dev->dev); | |
1255 | err_build_gdr: | |
1256 | crypto4xx_destroy_pdr(core_dev->dev); | |
1257 | err_build_pdr: | |
1258 | kfree(core_dev->dev); | |
1259 | err_alloc_dev: | |
1260 | kfree(core_dev); | |
1261 | ||
1262 | return rc; | |
1263 | } | |
1264 | ||
1265 | static int __exit crypto4xx_remove(struct of_device *ofdev) | |
1266 | { | |
1267 | struct device *dev = &ofdev->dev; | |
1268 | struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); | |
1269 | ||
1270 | free_irq(core_dev->irq, dev); | |
1271 | irq_dispose_mapping(core_dev->irq); | |
1272 | ||
1273 | tasklet_kill(&core_dev->tasklet); | |
1274 | /* Un-register with Linux CryptoAPI */ | |
1275 | crypto4xx_unregister_alg(core_dev->dev); | |
1276 | /* Free all allocated memory */ | |
1277 | crypto4xx_stop_all(core_dev); | |
1278 | ||
1279 | return 0; | |
1280 | } | |
1281 | ||
1282 | static struct of_device_id crypto4xx_match[] = { | |
1283 | { .compatible = "amcc,ppc4xx-crypto",}, | |
1284 | { }, | |
1285 | }; | |
1286 | ||
1287 | static struct of_platform_driver crypto4xx_driver = { | |
1288 | .name = "crypto4xx", | |
1289 | .match_table = crypto4xx_match, | |
1290 | .probe = crypto4xx_probe, | |
1291 | .remove = crypto4xx_remove, | |
1292 | }; | |
1293 | ||
1294 | static int __init crypto4xx_init(void) | |
1295 | { | |
1296 | return of_register_platform_driver(&crypto4xx_driver); | |
1297 | } | |
1298 | ||
1299 | static void __exit crypto4xx_exit(void) | |
1300 | { | |
1301 | of_unregister_platform_driver(&crypto4xx_driver); | |
1302 | } | |
1303 | ||
1304 | module_init(crypto4xx_init); | |
1305 | module_exit(crypto4xx_exit); | |
1306 | ||
1307 | MODULE_LICENSE("GPL"); | |
1308 | MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>"); | |
1309 | MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator"); | |
1310 |