Commit | Line | Data |
---|---|---|
ce921368 JI |
1 | /* |
2 | * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
17 | */ | |
2d78db09 | 18 | #include <crypto/internal/aead.h> |
ce921368 JI |
19 | #include <crypto/aes.h> |
20 | #include <crypto/algapi.h> | |
21 | #include <crypto/authenc.h> | |
22 | #include <crypto/des.h> | |
23 | #include <crypto/md5.h> | |
24 | #include <crypto/sha.h> | |
25 | #include <crypto/internal/skcipher.h> | |
26 | #include <linux/clk.h> | |
27 | #include <linux/crypto.h> | |
28 | #include <linux/delay.h> | |
29 | #include <linux/dma-mapping.h> | |
30 | #include <linux/dmapool.h> | |
31 | #include <linux/err.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/interrupt.h> | |
34 | #include <linux/io.h> | |
35 | #include <linux/list.h> | |
36 | #include <linux/module.h> | |
30343ef1 | 37 | #include <linux/of.h> |
ce921368 JI |
38 | #include <linux/platform_device.h> |
39 | #include <linux/pm.h> | |
40 | #include <linux/rtnetlink.h> | |
41 | #include <linux/scatterlist.h> | |
42 | #include <linux/sched.h> | |
72071fe4 | 43 | #include <linux/sizes.h> |
ce921368 JI |
44 | #include <linux/slab.h> |
45 | #include <linux/timer.h> | |
46 | ||
47 | #include "picoxcell_crypto_regs.h" | |
48 | ||
49 | /* | |
50 | * The threshold for the number of entries in the CMD FIFO available before | |
51 | * the CMD0_CNT interrupt is raised. Increasing this value will reduce the | |
52 | * number of interrupts raised to the CPU. | |
53 | */ | |
54 | #define CMD0_IRQ_THRESHOLD 1 | |
55 | ||
56 | /* | |
57 | * The timeout period (in jiffies) for a PDU. When the the number of PDUs in | |
58 | * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled. | |
59 | * When there are packets in flight but lower than the threshold, we enable | |
60 | * the timer and at expiry, attempt to remove any processed packets from the | |
61 | * queue and if there are still packets left, schedule the timer again. | |
62 | */ | |
63 | #define PACKET_TIMEOUT 1 | |
64 | ||
65 | /* The priority to register each algorithm with. */ | |
66 | #define SPACC_CRYPTO_ALG_PRIORITY 10000 | |
67 | ||
68 | #define SPACC_CRYPTO_KASUMI_F8_KEY_LEN 16 | |
69 | #define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64 | |
70 | #define SPACC_CRYPTO_IPSEC_HASH_PG_SZ 64 | |
71 | #define SPACC_CRYPTO_IPSEC_MAX_CTXS 32 | |
72 | #define SPACC_CRYPTO_IPSEC_FIFO_SZ 32 | |
73 | #define SPACC_CRYPTO_L2_CIPHER_PG_SZ 64 | |
74 | #define SPACC_CRYPTO_L2_HASH_PG_SZ 64 | |
75 | #define SPACC_CRYPTO_L2_MAX_CTXS 128 | |
76 | #define SPACC_CRYPTO_L2_FIFO_SZ 128 | |
77 | ||
78 | #define MAX_DDT_LEN 16 | |
79 | ||
80 | /* DDT format. This must match the hardware DDT format exactly. */ | |
81 | struct spacc_ddt { | |
82 | dma_addr_t p; | |
83 | u32 len; | |
84 | }; | |
85 | ||
86 | /* | |
87 | * Asynchronous crypto request structure. | |
88 | * | |
89 | * This structure defines a request that is either queued for processing or | |
90 | * being processed. | |
91 | */ | |
92 | struct spacc_req { | |
93 | struct list_head list; | |
94 | struct spacc_engine *engine; | |
95 | struct crypto_async_request *req; | |
96 | int result; | |
97 | bool is_encrypt; | |
98 | unsigned ctx_id; | |
99 | dma_addr_t src_addr, dst_addr; | |
100 | struct spacc_ddt *src_ddt, *dst_ddt; | |
101 | void (*complete)(struct spacc_req *req); | |
102 | ||
103 | /* AEAD specific bits. */ | |
104 | u8 *giv; | |
105 | size_t giv_len; | |
106 | dma_addr_t giv_pa; | |
107 | }; | |
108 | ||
109 | struct spacc_engine { | |
110 | void __iomem *regs; | |
111 | struct list_head pending; | |
112 | int next_ctx; | |
113 | spinlock_t hw_lock; | |
114 | int in_flight; | |
115 | struct list_head completed; | |
116 | struct list_head in_progress; | |
117 | struct tasklet_struct complete; | |
118 | unsigned long fifo_sz; | |
119 | void __iomem *cipher_ctx_base; | |
120 | void __iomem *hash_key_base; | |
121 | struct spacc_alg *algs; | |
122 | unsigned num_algs; | |
123 | struct list_head registered_algs; | |
124 | size_t cipher_pg_sz; | |
125 | size_t hash_pg_sz; | |
126 | const char *name; | |
127 | struct clk *clk; | |
128 | struct device *dev; | |
129 | unsigned max_ctxs; | |
130 | struct timer_list packet_timeout; | |
131 | unsigned stat_irq_thresh; | |
132 | struct dma_pool *req_pool; | |
133 | }; | |
134 | ||
135 | /* Algorithm type mask. */ | |
136 | #define SPACC_CRYPTO_ALG_MASK 0x7 | |
137 | ||
138 | /* SPACC definition of a crypto algorithm. */ | |
139 | struct spacc_alg { | |
140 | unsigned long ctrl_default; | |
141 | unsigned long type; | |
142 | struct crypto_alg alg; | |
143 | struct spacc_engine *engine; | |
144 | struct list_head entry; | |
145 | int key_offs; | |
146 | int iv_offs; | |
147 | }; | |
148 | ||
149 | /* Generic context structure for any algorithm type. */ | |
150 | struct spacc_generic_ctx { | |
151 | struct spacc_engine *engine; | |
152 | int flags; | |
153 | int key_offs; | |
154 | int iv_offs; | |
155 | }; | |
156 | ||
157 | /* Block cipher context. */ | |
158 | struct spacc_ablk_ctx { | |
159 | struct spacc_generic_ctx generic; | |
160 | u8 key[AES_MAX_KEY_SIZE]; | |
161 | u8 key_len; | |
162 | /* | |
163 | * The fallback cipher. If the operation can't be done in hardware, | |
164 | * fallback to a software version. | |
165 | */ | |
166 | struct crypto_ablkcipher *sw_cipher; | |
167 | }; | |
168 | ||
169 | /* AEAD cipher context. */ | |
170 | struct spacc_aead_ctx { | |
171 | struct spacc_generic_ctx generic; | |
172 | u8 cipher_key[AES_MAX_KEY_SIZE]; | |
173 | u8 hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ]; | |
174 | u8 cipher_key_len; | |
175 | u8 hash_key_len; | |
176 | struct crypto_aead *sw_cipher; | |
177 | size_t auth_size; | |
178 | u8 salt[AES_BLOCK_SIZE]; | |
179 | }; | |
180 | ||
40bfc14f JI |
181 | static int spacc_ablk_submit(struct spacc_req *req); |
182 | ||
ce921368 JI |
183 | static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg) |
184 | { | |
185 | return alg ? container_of(alg, struct spacc_alg, alg) : NULL; | |
186 | } | |
187 | ||
188 | static inline int spacc_fifo_cmd_full(struct spacc_engine *engine) | |
189 | { | |
190 | u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET); | |
191 | ||
192 | return fifo_stat & SPA_FIFO_CMD_FULL; | |
193 | } | |
194 | ||
195 | /* | |
196 | * Given a cipher context, and a context number, get the base address of the | |
197 | * context page. | |
198 | * | |
199 | * Returns the address of the context page where the key/context may | |
200 | * be written. | |
201 | */ | |
202 | static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx, | |
203 | unsigned indx, | |
204 | bool is_cipher_ctx) | |
205 | { | |
206 | return is_cipher_ctx ? ctx->engine->cipher_ctx_base + | |
207 | (indx * ctx->engine->cipher_pg_sz) : | |
208 | ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz); | |
209 | } | |
210 | ||
211 | /* The context pages can only be written with 32-bit accesses. */ | |
212 | static inline void memcpy_toio32(u32 __iomem *dst, const void *src, | |
213 | unsigned count) | |
214 | { | |
215 | const u32 *src32 = (const u32 *) src; | |
216 | ||
217 | while (count--) | |
218 | writel(*src32++, dst++); | |
219 | } | |
220 | ||
221 | static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx, | |
222 | void __iomem *page_addr, const u8 *key, | |
223 | size_t key_len, const u8 *iv, size_t iv_len) | |
224 | { | |
225 | void __iomem *key_ptr = page_addr + ctx->key_offs; | |
226 | void __iomem *iv_ptr = page_addr + ctx->iv_offs; | |
227 | ||
228 | memcpy_toio32(key_ptr, key, key_len / 4); | |
229 | memcpy_toio32(iv_ptr, iv, iv_len / 4); | |
230 | } | |
231 | ||
232 | /* | |
233 | * Load a context into the engines context memory. | |
234 | * | |
235 | * Returns the index of the context page where the context was loaded. | |
236 | */ | |
237 | static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx, | |
238 | const u8 *ciph_key, size_t ciph_len, | |
239 | const u8 *iv, size_t ivlen, const u8 *hash_key, | |
240 | size_t hash_len) | |
241 | { | |
242 | unsigned indx = ctx->engine->next_ctx++; | |
243 | void __iomem *ciph_page_addr, *hash_page_addr; | |
244 | ||
245 | ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1); | |
246 | hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0); | |
247 | ||
248 | ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1; | |
249 | spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv, | |
250 | ivlen); | |
251 | writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) | | |
252 | (1 << SPA_KEY_SZ_CIPHER_OFFSET), | |
253 | ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET); | |
254 | ||
255 | if (hash_key) { | |
256 | memcpy_toio32(hash_page_addr, hash_key, hash_len / 4); | |
257 | writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET), | |
258 | ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET); | |
259 | } | |
260 | ||
261 | return indx; | |
262 | } | |
263 | ||
264 | /* Count the number of scatterlist entries in a scatterlist. */ | |
1a5b951f | 265 | static inline int sg_count(struct scatterlist *sg_list, int nbytes) |
ce921368 | 266 | { |
1a5b951f | 267 | return sg_nents_for_len(sg_list, nbytes); |
ce921368 JI |
268 | } |
269 | ||
270 | static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len) | |
271 | { | |
272 | ddt->p = phys; | |
273 | ddt->len = len; | |
274 | } | |
275 | ||
276 | /* | |
277 | * Take a crypto request and scatterlists for the data and turn them into DDTs | |
278 | * for passing to the crypto engines. This also DMA maps the data so that the | |
279 | * crypto engines can DMA to/from them. | |
280 | */ | |
281 | static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine, | |
282 | struct scatterlist *payload, | |
283 | unsigned nbytes, | |
284 | enum dma_data_direction dir, | |
285 | dma_addr_t *ddt_phys) | |
286 | { | |
287 | unsigned nents, mapped_ents; | |
288 | struct scatterlist *cur; | |
289 | struct spacc_ddt *ddt; | |
290 | int i; | |
291 | ||
292 | nents = sg_count(payload, nbytes); | |
293 | mapped_ents = dma_map_sg(engine->dev, payload, nents, dir); | |
294 | ||
295 | if (mapped_ents + 1 > MAX_DDT_LEN) | |
296 | goto out; | |
297 | ||
298 | ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys); | |
299 | if (!ddt) | |
300 | goto out; | |
301 | ||
302 | for_each_sg(payload, cur, mapped_ents, i) | |
303 | ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur)); | |
304 | ddt_set(&ddt[mapped_ents], 0, 0); | |
305 | ||
306 | return ddt; | |
307 | ||
308 | out: | |
309 | dma_unmap_sg(engine->dev, payload, nents, dir); | |
310 | return NULL; | |
311 | } | |
312 | ||
313 | static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv) | |
314 | { | |
315 | struct aead_request *areq = container_of(req->req, struct aead_request, | |
316 | base); | |
317 | struct spacc_engine *engine = req->engine; | |
318 | struct spacc_ddt *src_ddt, *dst_ddt; | |
319 | unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq)); | |
320 | unsigned nents = sg_count(areq->src, areq->cryptlen); | |
81781e68 | 321 | unsigned total; |
ce921368 JI |
322 | dma_addr_t iv_addr; |
323 | struct scatterlist *cur; | |
324 | int i, dst_ents, src_ents, assoc_ents; | |
325 | u8 *iv = giv ? giv : areq->iv; | |
326 | ||
327 | src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr); | |
328 | if (!src_ddt) | |
329 | return -ENOMEM; | |
330 | ||
331 | dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr); | |
332 | if (!dst_ddt) { | |
333 | dma_pool_free(engine->req_pool, src_ddt, req->src_addr); | |
334 | return -ENOMEM; | |
335 | } | |
336 | ||
337 | req->src_ddt = src_ddt; | |
338 | req->dst_ddt = dst_ddt; | |
339 | ||
340 | assoc_ents = dma_map_sg(engine->dev, areq->assoc, | |
341 | sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE); | |
342 | if (areq->src != areq->dst) { | |
343 | src_ents = dma_map_sg(engine->dev, areq->src, nents, | |
344 | DMA_TO_DEVICE); | |
345 | dst_ents = dma_map_sg(engine->dev, areq->dst, nents, | |
346 | DMA_FROM_DEVICE); | |
347 | } else { | |
348 | src_ents = dma_map_sg(engine->dev, areq->src, nents, | |
349 | DMA_BIDIRECTIONAL); | |
350 | dst_ents = 0; | |
351 | } | |
352 | ||
353 | /* | |
354 | * Map the IV/GIV. For the GIV it needs to be bidirectional as it is | |
355 | * formed by the crypto block and sent as the ESP IV for IPSEC. | |
356 | */ | |
357 | iv_addr = dma_map_single(engine->dev, iv, ivsize, | |
358 | giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); | |
359 | req->giv_pa = iv_addr; | |
360 | ||
361 | /* | |
362 | * Map the associated data. For decryption we don't copy the | |
363 | * associated data. | |
364 | */ | |
81781e68 | 365 | total = areq->assoclen; |
ce921368 | 366 | for_each_sg(areq->assoc, cur, assoc_ents, i) { |
81781e68 HX |
367 | unsigned len = sg_dma_len(cur); |
368 | ||
369 | if (len > total) | |
370 | len = total; | |
371 | ||
372 | total -= len; | |
373 | ||
374 | ddt_set(src_ddt++, sg_dma_address(cur), len); | |
ce921368 | 375 | if (req->is_encrypt) |
81781e68 | 376 | ddt_set(dst_ddt++, sg_dma_address(cur), len); |
ce921368 JI |
377 | } |
378 | ddt_set(src_ddt++, iv_addr, ivsize); | |
379 | ||
380 | if (giv || req->is_encrypt) | |
381 | ddt_set(dst_ddt++, iv_addr, ivsize); | |
382 | ||
383 | /* | |
384 | * Now map in the payload for the source and destination and terminate | |
385 | * with the NULL pointers. | |
386 | */ | |
387 | for_each_sg(areq->src, cur, src_ents, i) { | |
388 | ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); | |
389 | if (areq->src == areq->dst) | |
390 | ddt_set(dst_ddt++, sg_dma_address(cur), | |
391 | sg_dma_len(cur)); | |
392 | } | |
393 | ||
394 | for_each_sg(areq->dst, cur, dst_ents, i) | |
395 | ddt_set(dst_ddt++, sg_dma_address(cur), | |
396 | sg_dma_len(cur)); | |
397 | ||
398 | ddt_set(src_ddt, 0, 0); | |
399 | ddt_set(dst_ddt, 0, 0); | |
400 | ||
401 | return 0; | |
402 | } | |
403 | ||
404 | static void spacc_aead_free_ddts(struct spacc_req *req) | |
405 | { | |
406 | struct aead_request *areq = container_of(req->req, struct aead_request, | |
407 | base); | |
408 | struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg); | |
409 | struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm); | |
410 | struct spacc_engine *engine = aead_ctx->generic.engine; | |
411 | unsigned ivsize = alg->alg.cra_aead.ivsize; | |
412 | unsigned nents = sg_count(areq->src, areq->cryptlen); | |
413 | ||
414 | if (areq->src != areq->dst) { | |
415 | dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE); | |
416 | dma_unmap_sg(engine->dev, areq->dst, | |
417 | sg_count(areq->dst, areq->cryptlen), | |
418 | DMA_FROM_DEVICE); | |
419 | } else | |
420 | dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL); | |
421 | ||
422 | dma_unmap_sg(engine->dev, areq->assoc, | |
423 | sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE); | |
424 | ||
425 | dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL); | |
426 | ||
427 | dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr); | |
428 | dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr); | |
429 | } | |
430 | ||
431 | static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt, | |
432 | dma_addr_t ddt_addr, struct scatterlist *payload, | |
433 | unsigned nbytes, enum dma_data_direction dir) | |
434 | { | |
435 | unsigned nents = sg_count(payload, nbytes); | |
436 | ||
437 | dma_unmap_sg(req->engine->dev, payload, nents, dir); | |
438 | dma_pool_free(req->engine->req_pool, ddt, ddt_addr); | |
439 | } | |
440 | ||
441 | /* | |
442 | * Set key for a DES operation in an AEAD cipher. This also performs weak key | |
443 | * checking if required. | |
444 | */ | |
445 | static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key, | |
446 | unsigned int len) | |
447 | { | |
448 | struct crypto_tfm *tfm = crypto_aead_tfm(aead); | |
449 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | |
450 | u32 tmp[DES_EXPKEY_WORDS]; | |
451 | ||
452 | if (unlikely(!des_ekey(tmp, key)) && | |
453 | (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) { | |
454 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | |
455 | return -EINVAL; | |
456 | } | |
457 | ||
458 | memcpy(ctx->cipher_key, key, len); | |
459 | ctx->cipher_key_len = len; | |
460 | ||
461 | return 0; | |
462 | } | |
463 | ||
464 | /* Set the key for the AES block cipher component of the AEAD transform. */ | |
465 | static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key, | |
466 | unsigned int len) | |
467 | { | |
468 | struct crypto_tfm *tfm = crypto_aead_tfm(aead); | |
469 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | |
470 | ||
471 | /* | |
472 | * IPSec engine only supports 128 and 256 bit AES keys. If we get a | |
473 | * request for any other size (192 bits) then we need to do a software | |
474 | * fallback. | |
475 | */ | |
476 | if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) { | |
477 | /* | |
478 | * Set the fallback transform to use the same request flags as | |
479 | * the hardware transform. | |
480 | */ | |
481 | ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | |
482 | ctx->sw_cipher->base.crt_flags |= | |
483 | tfm->crt_flags & CRYPTO_TFM_REQ_MASK; | |
484 | return crypto_aead_setkey(ctx->sw_cipher, key, len); | |
485 | } | |
486 | ||
487 | memcpy(ctx->cipher_key, key, len); | |
488 | ctx->cipher_key_len = len; | |
489 | ||
490 | return 0; | |
491 | } | |
492 | ||
493 | static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |
494 | unsigned int keylen) | |
495 | { | |
496 | struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); | |
497 | struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); | |
ab827fb3 | 498 | struct crypto_authenc_keys keys; |
ce921368 JI |
499 | int err = -EINVAL; |
500 | ||
ab827fb3 | 501 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
ce921368 JI |
502 | goto badkey; |
503 | ||
ab827fb3 | 504 | if (keys.enckeylen > AES_MAX_KEY_SIZE) |
ce921368 JI |
505 | goto badkey; |
506 | ||
ab827fb3 | 507 | if (keys.authkeylen > sizeof(ctx->hash_ctx)) |
ce921368 JI |
508 | goto badkey; |
509 | ||
510 | if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == | |
511 | SPA_CTRL_CIPH_ALG_AES) | |
ab827fb3 | 512 | err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen); |
ce921368 | 513 | else |
ab827fb3 | 514 | err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen); |
ce921368 JI |
515 | |
516 | if (err) | |
517 | goto badkey; | |
518 | ||
ab827fb3 MK |
519 | memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen); |
520 | ctx->hash_key_len = keys.authkeylen; | |
ce921368 JI |
521 | |
522 | return 0; | |
523 | ||
524 | badkey: | |
525 | crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
526 | return -EINVAL; | |
527 | } | |
528 | ||
529 | static int spacc_aead_setauthsize(struct crypto_aead *tfm, | |
530 | unsigned int authsize) | |
531 | { | |
532 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm)); | |
533 | ||
534 | ctx->auth_size = authsize; | |
535 | ||
536 | return 0; | |
537 | } | |
538 | ||
539 | /* | |
540 | * Check if an AEAD request requires a fallback operation. Some requests can't | |
541 | * be completed in hardware because the hardware may not support certain key | |
542 | * sizes. In these cases we need to complete the request in software. | |
543 | */ | |
544 | static int spacc_aead_need_fallback(struct spacc_req *req) | |
545 | { | |
546 | struct aead_request *aead_req; | |
547 | struct crypto_tfm *tfm = req->req->tfm; | |
548 | struct crypto_alg *alg = req->req->tfm->__crt_alg; | |
549 | struct spacc_alg *spacc_alg = to_spacc_alg(alg); | |
550 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | |
551 | ||
552 | aead_req = container_of(req->req, struct aead_request, base); | |
553 | /* | |
554 | * If we have a non-supported key-length, then we need to do a | |
555 | * software fallback. | |
556 | */ | |
557 | if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == | |
558 | SPA_CTRL_CIPH_ALG_AES && | |
559 | ctx->cipher_key_len != AES_KEYSIZE_128 && | |
560 | ctx->cipher_key_len != AES_KEYSIZE_256) | |
561 | return 1; | |
562 | ||
563 | return 0; | |
564 | } | |
565 | ||
566 | static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type, | |
567 | bool is_encrypt) | |
568 | { | |
569 | struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req)); | |
570 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm); | |
571 | int err; | |
572 | ||
573 | if (ctx->sw_cipher) { | |
574 | /* | |
575 | * Change the request to use the software fallback transform, | |
576 | * and once the ciphering has completed, put the old transform | |
577 | * back into the request. | |
578 | */ | |
579 | aead_request_set_tfm(req, ctx->sw_cipher); | |
580 | err = is_encrypt ? crypto_aead_encrypt(req) : | |
581 | crypto_aead_decrypt(req); | |
582 | aead_request_set_tfm(req, __crypto_aead_cast(old_tfm)); | |
583 | } else | |
584 | err = -EINVAL; | |
585 | ||
586 | return err; | |
587 | } | |
588 | ||
589 | static void spacc_aead_complete(struct spacc_req *req) | |
590 | { | |
591 | spacc_aead_free_ddts(req); | |
592 | req->req->complete(req->req, req->result); | |
593 | } | |
594 | ||
595 | static int spacc_aead_submit(struct spacc_req *req) | |
596 | { | |
597 | struct crypto_tfm *tfm = req->req->tfm; | |
598 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | |
599 | struct crypto_alg *alg = req->req->tfm->__crt_alg; | |
600 | struct spacc_alg *spacc_alg = to_spacc_alg(alg); | |
601 | struct spacc_engine *engine = ctx->generic.engine; | |
602 | u32 ctrl, proc_len, assoc_len; | |
603 | struct aead_request *aead_req = | |
604 | container_of(req->req, struct aead_request, base); | |
605 | ||
606 | req->result = -EINPROGRESS; | |
607 | req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key, | |
608 | ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize, | |
609 | ctx->hash_ctx, ctx->hash_key_len); | |
610 | ||
611 | /* Set the source and destination DDT pointers. */ | |
612 | writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET); | |
613 | writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET); | |
614 | writel(0, engine->regs + SPA_OFFSET_REG_OFFSET); | |
615 | ||
616 | assoc_len = aead_req->assoclen; | |
617 | proc_len = aead_req->cryptlen + assoc_len; | |
618 | ||
619 | /* | |
620 | * If we aren't generating an IV, then we need to include the IV in the | |
621 | * associated data so that it is included in the hash. | |
622 | */ | |
623 | if (!req->giv) { | |
624 | assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req)); | |
625 | proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req)); | |
626 | } else | |
627 | proc_len += req->giv_len; | |
628 | ||
629 | /* | |
630 | * If we are decrypting, we need to take the length of the ICV out of | |
631 | * the processing length. | |
632 | */ | |
633 | if (!req->is_encrypt) | |
634 | proc_len -= ctx->auth_size; | |
635 | ||
636 | writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET); | |
637 | writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET); | |
638 | writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET); | |
639 | writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET); | |
640 | writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET); | |
641 | ||
642 | ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) | | |
643 | (1 << SPA_CTRL_ICV_APPEND); | |
644 | if (req->is_encrypt) | |
645 | ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY); | |
646 | else | |
647 | ctrl |= (1 << SPA_CTRL_KEY_EXP); | |
648 | ||
649 | mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); | |
650 | ||
651 | writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET); | |
652 | ||
653 | return -EINPROGRESS; | |
654 | } | |
655 | ||
40bfc14f JI |
656 | static int spacc_req_submit(struct spacc_req *req); |
657 | ||
658 | static void spacc_push(struct spacc_engine *engine) | |
659 | { | |
660 | struct spacc_req *req; | |
661 | ||
662 | while (!list_empty(&engine->pending) && | |
663 | engine->in_flight + 1 <= engine->fifo_sz) { | |
664 | ||
665 | ++engine->in_flight; | |
666 | req = list_first_entry(&engine->pending, struct spacc_req, | |
667 | list); | |
668 | list_move_tail(&req->list, &engine->in_progress); | |
669 | ||
670 | req->result = spacc_req_submit(req); | |
671 | } | |
672 | } | |
673 | ||
ce921368 JI |
674 | /* |
675 | * Setup an AEAD request for processing. This will configure the engine, load | |
676 | * the context and then start the packet processing. | |
677 | * | |
678 | * @giv Pointer to destination address for a generated IV. If the | |
679 | * request does not need to generate an IV then this should be set to NULL. | |
680 | */ | |
681 | static int spacc_aead_setup(struct aead_request *req, u8 *giv, | |
682 | unsigned alg_type, bool is_encrypt) | |
683 | { | |
684 | struct crypto_alg *alg = req->base.tfm->__crt_alg; | |
685 | struct spacc_engine *engine = to_spacc_alg(alg)->engine; | |
686 | struct spacc_req *dev_req = aead_request_ctx(req); | |
687 | int err = -EINPROGRESS; | |
688 | unsigned long flags; | |
689 | unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); | |
690 | ||
691 | dev_req->giv = giv; | |
692 | dev_req->giv_len = ivsize; | |
693 | dev_req->req = &req->base; | |
694 | dev_req->is_encrypt = is_encrypt; | |
695 | dev_req->result = -EBUSY; | |
696 | dev_req->engine = engine; | |
697 | dev_req->complete = spacc_aead_complete; | |
698 | ||
699 | if (unlikely(spacc_aead_need_fallback(dev_req))) | |
700 | return spacc_aead_do_fallback(req, alg_type, is_encrypt); | |
701 | ||
702 | spacc_aead_make_ddts(dev_req, dev_req->giv); | |
703 | ||
704 | err = -EINPROGRESS; | |
705 | spin_lock_irqsave(&engine->hw_lock, flags); | |
40bfc14f JI |
706 | if (unlikely(spacc_fifo_cmd_full(engine)) || |
707 | engine->in_flight + 1 > engine->fifo_sz) { | |
ce921368 JI |
708 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
709 | err = -EBUSY; | |
710 | spin_unlock_irqrestore(&engine->hw_lock, flags); | |
711 | goto out_free_ddts; | |
712 | } | |
713 | list_add_tail(&dev_req->list, &engine->pending); | |
714 | } else { | |
40bfc14f JI |
715 | list_add_tail(&dev_req->list, &engine->pending); |
716 | spacc_push(engine); | |
ce921368 JI |
717 | } |
718 | spin_unlock_irqrestore(&engine->hw_lock, flags); | |
719 | ||
720 | goto out; | |
721 | ||
722 | out_free_ddts: | |
723 | spacc_aead_free_ddts(dev_req); | |
724 | out: | |
725 | return err; | |
726 | } | |
727 | ||
728 | static int spacc_aead_encrypt(struct aead_request *req) | |
729 | { | |
730 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | |
731 | struct crypto_tfm *tfm = crypto_aead_tfm(aead); | |
732 | struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); | |
733 | ||
734 | return spacc_aead_setup(req, NULL, alg->type, 1); | |
735 | } | |
736 | ||
737 | static int spacc_aead_givencrypt(struct aead_givcrypt_request *req) | |
738 | { | |
739 | struct crypto_aead *tfm = aead_givcrypt_reqtfm(req); | |
740 | struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); | |
741 | size_t ivsize = crypto_aead_ivsize(tfm); | |
742 | struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); | |
743 | unsigned len; | |
744 | __be64 seq; | |
745 | ||
746 | memcpy(req->areq.iv, ctx->salt, ivsize); | |
747 | len = ivsize; | |
748 | if (ivsize > sizeof(u64)) { | |
749 | memset(req->giv, 0, ivsize - sizeof(u64)); | |
750 | len = sizeof(u64); | |
751 | } | |
752 | seq = cpu_to_be64(req->seq); | |
753 | memcpy(req->giv + ivsize - len, &seq, len); | |
754 | ||
755 | return spacc_aead_setup(&req->areq, req->giv, alg->type, 1); | |
756 | } | |
757 | ||
758 | static int spacc_aead_decrypt(struct aead_request *req) | |
759 | { | |
760 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | |
761 | struct crypto_tfm *tfm = crypto_aead_tfm(aead); | |
762 | struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); | |
763 | ||
764 | return spacc_aead_setup(req, NULL, alg->type, 0); | |
765 | } | |
766 | ||
767 | /* | |
768 | * Initialise a new AEAD context. This is responsible for allocating the | |
769 | * fallback cipher and initialising the context. | |
770 | */ | |
771 | static int spacc_aead_cra_init(struct crypto_tfm *tfm) | |
772 | { | |
773 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | |
774 | struct crypto_alg *alg = tfm->__crt_alg; | |
775 | struct spacc_alg *spacc_alg = to_spacc_alg(alg); | |
776 | struct spacc_engine *engine = spacc_alg->engine; | |
777 | ||
778 | ctx->generic.flags = spacc_alg->type; | |
779 | ctx->generic.engine = engine; | |
780 | ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0, | |
781 | CRYPTO_ALG_ASYNC | | |
782 | CRYPTO_ALG_NEED_FALLBACK); | |
783 | if (IS_ERR(ctx->sw_cipher)) { | |
784 | dev_warn(engine->dev, "failed to allocate fallback for %s\n", | |
785 | alg->cra_name); | |
786 | ctx->sw_cipher = NULL; | |
787 | } | |
788 | ctx->generic.key_offs = spacc_alg->key_offs; | |
789 | ctx->generic.iv_offs = spacc_alg->iv_offs; | |
790 | ||
791 | get_random_bytes(ctx->salt, sizeof(ctx->salt)); | |
792 | ||
9611ef63 HX |
793 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), |
794 | sizeof(struct spacc_req)); | |
ce921368 JI |
795 | |
796 | return 0; | |
797 | } | |
798 | ||
799 | /* | |
800 | * Destructor for an AEAD context. This is called when the transform is freed | |
801 | * and must free the fallback cipher. | |
802 | */ | |
803 | static void spacc_aead_cra_exit(struct crypto_tfm *tfm) | |
804 | { | |
805 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | |
806 | ||
807 | if (ctx->sw_cipher) | |
808 | crypto_free_aead(ctx->sw_cipher); | |
809 | ctx->sw_cipher = NULL; | |
810 | } | |
811 | ||
812 | /* | |
813 | * Set the DES key for a block cipher transform. This also performs weak key | |
814 | * checking if the transform has requested it. | |
815 | */ | |
816 | static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |
817 | unsigned int len) | |
818 | { | |
819 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | |
820 | struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | |
821 | u32 tmp[DES_EXPKEY_WORDS]; | |
822 | ||
823 | if (len > DES3_EDE_KEY_SIZE) { | |
824 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
825 | return -EINVAL; | |
826 | } | |
827 | ||
828 | if (unlikely(!des_ekey(tmp, key)) && | |
829 | (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) { | |
830 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | |
831 | return -EINVAL; | |
832 | } | |
833 | ||
834 | memcpy(ctx->key, key, len); | |
835 | ctx->key_len = len; | |
836 | ||
837 | return 0; | |
838 | } | |
839 | ||
840 | /* | |
841 | * Set the key for an AES block cipher. Some key lengths are not supported in | |
842 | * hardware so this must also check whether a fallback is needed. | |
843 | */ | |
844 | static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |
845 | unsigned int len) | |
846 | { | |
847 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | |
848 | struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | |
849 | int err = 0; | |
850 | ||
851 | if (len > AES_MAX_KEY_SIZE) { | |
852 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
853 | return -EINVAL; | |
854 | } | |
855 | ||
856 | /* | |
857 | * IPSec engine only supports 128 and 256 bit AES keys. If we get a | |
858 | * request for any other size (192 bits) then we need to do a software | |
859 | * fallback. | |
860 | */ | |
a9c57a9c | 861 | if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 && |
ce921368 JI |
862 | ctx->sw_cipher) { |
863 | /* | |
864 | * Set the fallback transform to use the same request flags as | |
865 | * the hardware transform. | |
866 | */ | |
867 | ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | |
868 | ctx->sw_cipher->base.crt_flags |= | |
869 | cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK; | |
870 | ||
871 | err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len); | |
872 | if (err) | |
873 | goto sw_setkey_failed; | |
a9c57a9c | 874 | } else if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 && |
ce921368 JI |
875 | !ctx->sw_cipher) |
876 | err = -EINVAL; | |
877 | ||
878 | memcpy(ctx->key, key, len); | |
879 | ctx->key_len = len; | |
880 | ||
881 | sw_setkey_failed: | |
882 | if (err && ctx->sw_cipher) { | |
883 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | |
884 | tfm->crt_flags |= | |
885 | ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK; | |
886 | } | |
887 | ||
888 | return err; | |
889 | } | |
890 | ||
891 | static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher, | |
892 | const u8 *key, unsigned int len) | |
893 | { | |
894 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | |
895 | struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | |
896 | int err = 0; | |
897 | ||
898 | if (len > AES_MAX_KEY_SIZE) { | |
899 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
900 | err = -EINVAL; | |
901 | goto out; | |
902 | } | |
903 | ||
904 | memcpy(ctx->key, key, len); | |
905 | ctx->key_len = len; | |
906 | ||
907 | out: | |
908 | return err; | |
909 | } | |
910 | ||
911 | static int spacc_ablk_need_fallback(struct spacc_req *req) | |
912 | { | |
913 | struct spacc_ablk_ctx *ctx; | |
914 | struct crypto_tfm *tfm = req->req->tfm; | |
915 | struct crypto_alg *alg = req->req->tfm->__crt_alg; | |
916 | struct spacc_alg *spacc_alg = to_spacc_alg(alg); | |
917 | ||
918 | ctx = crypto_tfm_ctx(tfm); | |
919 | ||
920 | return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == | |
921 | SPA_CTRL_CIPH_ALG_AES && | |
922 | ctx->key_len != AES_KEYSIZE_128 && | |
923 | ctx->key_len != AES_KEYSIZE_256; | |
924 | } | |
925 | ||
926 | static void spacc_ablk_complete(struct spacc_req *req) | |
927 | { | |
928 | struct ablkcipher_request *ablk_req = | |
929 | container_of(req->req, struct ablkcipher_request, base); | |
930 | ||
931 | if (ablk_req->src != ablk_req->dst) { | |
932 | spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src, | |
933 | ablk_req->nbytes, DMA_TO_DEVICE); | |
934 | spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst, | |
935 | ablk_req->nbytes, DMA_FROM_DEVICE); | |
936 | } else | |
937 | spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst, | |
938 | ablk_req->nbytes, DMA_BIDIRECTIONAL); | |
939 | ||
940 | req->req->complete(req->req, req->result); | |
941 | } | |
942 | ||
943 | static int spacc_ablk_submit(struct spacc_req *req) | |
944 | { | |
945 | struct crypto_tfm *tfm = req->req->tfm; | |
946 | struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | |
947 | struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req); | |
948 | struct crypto_alg *alg = req->req->tfm->__crt_alg; | |
949 | struct spacc_alg *spacc_alg = to_spacc_alg(alg); | |
950 | struct spacc_engine *engine = ctx->generic.engine; | |
951 | u32 ctrl; | |
952 | ||
953 | req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key, | |
954 | ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize, | |
955 | NULL, 0); | |
956 | ||
957 | writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET); | |
958 | writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET); | |
959 | writel(0, engine->regs + SPA_OFFSET_REG_OFFSET); | |
960 | ||
961 | writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET); | |
962 | writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET); | |
963 | writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET); | |
964 | writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET); | |
965 | ||
966 | ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) | | |
967 | (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) : | |
968 | (1 << SPA_CTRL_KEY_EXP)); | |
969 | ||
970 | mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); | |
971 | ||
972 | writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET); | |
973 | ||
974 | return -EINPROGRESS; | |
975 | } | |
976 | ||
977 | static int spacc_ablk_do_fallback(struct ablkcipher_request *req, | |
978 | unsigned alg_type, bool is_encrypt) | |
979 | { | |
980 | struct crypto_tfm *old_tfm = | |
981 | crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); | |
982 | struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm); | |
983 | int err; | |
984 | ||
985 | if (!ctx->sw_cipher) | |
986 | return -EINVAL; | |
987 | ||
988 | /* | |
989 | * Change the request to use the software fallback transform, and once | |
990 | * the ciphering has completed, put the old transform back into the | |
991 | * request. | |
992 | */ | |
993 | ablkcipher_request_set_tfm(req, ctx->sw_cipher); | |
994 | err = is_encrypt ? crypto_ablkcipher_encrypt(req) : | |
995 | crypto_ablkcipher_decrypt(req); | |
996 | ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm)); | |
997 | ||
998 | return err; | |
999 | } | |
1000 | ||
1001 | static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type, | |
1002 | bool is_encrypt) | |
1003 | { | |
1004 | struct crypto_alg *alg = req->base.tfm->__crt_alg; | |
1005 | struct spacc_engine *engine = to_spacc_alg(alg)->engine; | |
1006 | struct spacc_req *dev_req = ablkcipher_request_ctx(req); | |
1007 | unsigned long flags; | |
1008 | int err = -ENOMEM; | |
1009 | ||
1010 | dev_req->req = &req->base; | |
1011 | dev_req->is_encrypt = is_encrypt; | |
1012 | dev_req->engine = engine; | |
1013 | dev_req->complete = spacc_ablk_complete; | |
1014 | dev_req->result = -EINPROGRESS; | |
1015 | ||
1016 | if (unlikely(spacc_ablk_need_fallback(dev_req))) | |
1017 | return spacc_ablk_do_fallback(req, alg_type, is_encrypt); | |
1018 | ||
1019 | /* | |
1020 | * Create the DDT's for the engine. If we share the same source and | |
1021 | * destination then we can optimize by reusing the DDT's. | |
1022 | */ | |
1023 | if (req->src != req->dst) { | |
1024 | dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src, | |
1025 | req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr); | |
1026 | if (!dev_req->src_ddt) | |
1027 | goto out; | |
1028 | ||
1029 | dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst, | |
1030 | req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr); | |
1031 | if (!dev_req->dst_ddt) | |
1032 | goto out_free_src; | |
1033 | } else { | |
1034 | dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst, | |
1035 | req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr); | |
1036 | if (!dev_req->dst_ddt) | |
1037 | goto out; | |
1038 | ||
1039 | dev_req->src_ddt = NULL; | |
1040 | dev_req->src_addr = dev_req->dst_addr; | |
1041 | } | |
1042 | ||
1043 | err = -EINPROGRESS; | |
1044 | spin_lock_irqsave(&engine->hw_lock, flags); | |
1045 | /* | |
1046 | * Check if the engine will accept the operation now. If it won't then | |
1047 | * we either stick it on the end of a pending list if we can backlog, | |
1048 | * or bailout with an error if not. | |
1049 | */ | |
40bfc14f JI |
1050 | if (unlikely(spacc_fifo_cmd_full(engine)) || |
1051 | engine->in_flight + 1 > engine->fifo_sz) { | |
ce921368 JI |
1052 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
1053 | err = -EBUSY; | |
1054 | spin_unlock_irqrestore(&engine->hw_lock, flags); | |
1055 | goto out_free_ddts; | |
1056 | } | |
1057 | list_add_tail(&dev_req->list, &engine->pending); | |
1058 | } else { | |
40bfc14f JI |
1059 | list_add_tail(&dev_req->list, &engine->pending); |
1060 | spacc_push(engine); | |
ce921368 JI |
1061 | } |
1062 | spin_unlock_irqrestore(&engine->hw_lock, flags); | |
1063 | ||
1064 | goto out; | |
1065 | ||
1066 | out_free_ddts: | |
1067 | spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst, | |
1068 | req->nbytes, req->src == req->dst ? | |
1069 | DMA_BIDIRECTIONAL : DMA_FROM_DEVICE); | |
1070 | out_free_src: | |
1071 | if (req->src != req->dst) | |
1072 | spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr, | |
1073 | req->src, req->nbytes, DMA_TO_DEVICE); | |
1074 | out: | |
1075 | return err; | |
1076 | } | |
1077 | ||
1078 | static int spacc_ablk_cra_init(struct crypto_tfm *tfm) | |
1079 | { | |
1080 | struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | |
1081 | struct crypto_alg *alg = tfm->__crt_alg; | |
1082 | struct spacc_alg *spacc_alg = to_spacc_alg(alg); | |
1083 | struct spacc_engine *engine = spacc_alg->engine; | |
1084 | ||
1085 | ctx->generic.flags = spacc_alg->type; | |
1086 | ctx->generic.engine = engine; | |
1087 | if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) { | |
1088 | ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0, | |
1089 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | |
1090 | if (IS_ERR(ctx->sw_cipher)) { | |
1091 | dev_warn(engine->dev, "failed to allocate fallback for %s\n", | |
1092 | alg->cra_name); | |
1093 | ctx->sw_cipher = NULL; | |
1094 | } | |
1095 | } | |
1096 | ctx->generic.key_offs = spacc_alg->key_offs; | |
1097 | ctx->generic.iv_offs = spacc_alg->iv_offs; | |
1098 | ||
1099 | tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req); | |
1100 | ||
1101 | return 0; | |
1102 | } | |
1103 | ||
1104 | static void spacc_ablk_cra_exit(struct crypto_tfm *tfm) | |
1105 | { | |
1106 | struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | |
1107 | ||
1108 | if (ctx->sw_cipher) | |
1109 | crypto_free_ablkcipher(ctx->sw_cipher); | |
1110 | ctx->sw_cipher = NULL; | |
1111 | } | |
1112 | ||
1113 | static int spacc_ablk_encrypt(struct ablkcipher_request *req) | |
1114 | { | |
1115 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req); | |
1116 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | |
1117 | struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); | |
1118 | ||
1119 | return spacc_ablk_setup(req, alg->type, 1); | |
1120 | } | |
1121 | ||
1122 | static int spacc_ablk_decrypt(struct ablkcipher_request *req) | |
1123 | { | |
1124 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req); | |
1125 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | |
1126 | struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); | |
1127 | ||
1128 | return spacc_ablk_setup(req, alg->type, 0); | |
1129 | } | |
1130 | ||
1131 | static inline int spacc_fifo_stat_empty(struct spacc_engine *engine) | |
1132 | { | |
1133 | return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) & | |
1134 | SPA_FIFO_STAT_EMPTY; | |
1135 | } | |
1136 | ||
1137 | static void spacc_process_done(struct spacc_engine *engine) | |
1138 | { | |
1139 | struct spacc_req *req; | |
1140 | unsigned long flags; | |
1141 | ||
1142 | spin_lock_irqsave(&engine->hw_lock, flags); | |
1143 | ||
1144 | while (!spacc_fifo_stat_empty(engine)) { | |
1145 | req = list_first_entry(&engine->in_progress, struct spacc_req, | |
1146 | list); | |
1147 | list_move_tail(&req->list, &engine->completed); | |
40bfc14f | 1148 | --engine->in_flight; |
ce921368 JI |
1149 | |
1150 | /* POP the status register. */ | |
1151 | writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET); | |
1152 | req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) & | |
1153 | SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET; | |
1154 | ||
1155 | /* | |
1156 | * Convert the SPAcc error status into the standard POSIX error | |
1157 | * codes. | |
1158 | */ | |
1159 | if (unlikely(req->result)) { | |
1160 | switch (req->result) { | |
1161 | case SPA_STATUS_ICV_FAIL: | |
1162 | req->result = -EBADMSG; | |
1163 | break; | |
1164 | ||
1165 | case SPA_STATUS_MEMORY_ERROR: | |
1166 | dev_warn(engine->dev, | |
1167 | "memory error triggered\n"); | |
1168 | req->result = -EFAULT; | |
1169 | break; | |
1170 | ||
1171 | case SPA_STATUS_BLOCK_ERROR: | |
1172 | dev_warn(engine->dev, | |
1173 | "block error triggered\n"); | |
1174 | req->result = -EIO; | |
1175 | break; | |
1176 | } | |
1177 | } | |
1178 | } | |
1179 | ||
1180 | tasklet_schedule(&engine->complete); | |
1181 | ||
1182 | spin_unlock_irqrestore(&engine->hw_lock, flags); | |
1183 | } | |
1184 | ||
1185 | static irqreturn_t spacc_spacc_irq(int irq, void *dev) | |
1186 | { | |
1187 | struct spacc_engine *engine = (struct spacc_engine *)dev; | |
1188 | u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET); | |
1189 | ||
1190 | writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET); | |
1191 | spacc_process_done(engine); | |
1192 | ||
1193 | return IRQ_HANDLED; | |
1194 | } | |
1195 | ||
1196 | static void spacc_packet_timeout(unsigned long data) | |
1197 | { | |
1198 | struct spacc_engine *engine = (struct spacc_engine *)data; | |
1199 | ||
1200 | spacc_process_done(engine); | |
1201 | } | |
1202 | ||
1203 | static int spacc_req_submit(struct spacc_req *req) | |
1204 | { | |
1205 | struct crypto_alg *alg = req->req->tfm->__crt_alg; | |
1206 | ||
1207 | if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags)) | |
1208 | return spacc_aead_submit(req); | |
1209 | else | |
1210 | return spacc_ablk_submit(req); | |
1211 | } | |
1212 | ||
1213 | static void spacc_spacc_complete(unsigned long data) | |
1214 | { | |
1215 | struct spacc_engine *engine = (struct spacc_engine *)data; | |
1216 | struct spacc_req *req, *tmp; | |
1217 | unsigned long flags; | |
ce921368 JI |
1218 | LIST_HEAD(completed); |
1219 | ||
1220 | spin_lock_irqsave(&engine->hw_lock, flags); | |
40bfc14f | 1221 | |
ce921368 | 1222 | list_splice_init(&engine->completed, &completed); |
40bfc14f JI |
1223 | spacc_push(engine); |
1224 | if (engine->in_flight) | |
1225 | mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); | |
1226 | ||
ce921368 JI |
1227 | spin_unlock_irqrestore(&engine->hw_lock, flags); |
1228 | ||
1229 | list_for_each_entry_safe(req, tmp, &completed, list) { | |
40bfc14f | 1230 | list_del(&req->list); |
b64dc04b | 1231 | req->complete(req); |
ce921368 | 1232 | } |
ce921368 JI |
1233 | } |
1234 | ||
1235 | #ifdef CONFIG_PM | |
1236 | static int spacc_suspend(struct device *dev) | |
1237 | { | |
1238 | struct platform_device *pdev = to_platform_device(dev); | |
1239 | struct spacc_engine *engine = platform_get_drvdata(pdev); | |
1240 | ||
1241 | /* | |
1242 | * We only support standby mode. All we have to do is gate the clock to | |
1243 | * the spacc. The hardware will preserve state until we turn it back | |
1244 | * on again. | |
1245 | */ | |
1246 | clk_disable(engine->clk); | |
1247 | ||
1248 | return 0; | |
1249 | } | |
1250 | ||
1251 | static int spacc_resume(struct device *dev) | |
1252 | { | |
1253 | struct platform_device *pdev = to_platform_device(dev); | |
1254 | struct spacc_engine *engine = platform_get_drvdata(pdev); | |
1255 | ||
1256 | return clk_enable(engine->clk); | |
1257 | } | |
1258 | ||
1259 | static const struct dev_pm_ops spacc_pm_ops = { | |
1260 | .suspend = spacc_suspend, | |
1261 | .resume = spacc_resume, | |
1262 | }; | |
1263 | #endif /* CONFIG_PM */ | |
1264 | ||
1265 | static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev) | |
1266 | { | |
1267 | return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL; | |
1268 | } | |
1269 | ||
1270 | static ssize_t spacc_stat_irq_thresh_show(struct device *dev, | |
1271 | struct device_attribute *attr, | |
1272 | char *buf) | |
1273 | { | |
1274 | struct spacc_engine *engine = spacc_dev_to_engine(dev); | |
1275 | ||
1276 | return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh); | |
1277 | } | |
1278 | ||
1279 | static ssize_t spacc_stat_irq_thresh_store(struct device *dev, | |
1280 | struct device_attribute *attr, | |
1281 | const char *buf, size_t len) | |
1282 | { | |
1283 | struct spacc_engine *engine = spacc_dev_to_engine(dev); | |
1284 | unsigned long thresh; | |
1285 | ||
61e2d1a9 | 1286 | if (kstrtoul(buf, 0, &thresh)) |
ce921368 JI |
1287 | return -EINVAL; |
1288 | ||
1289 | thresh = clamp(thresh, 1UL, engine->fifo_sz - 1); | |
1290 | ||
1291 | engine->stat_irq_thresh = thresh; | |
1292 | writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET, | |
1293 | engine->regs + SPA_IRQ_CTRL_REG_OFFSET); | |
1294 | ||
1295 | return len; | |
1296 | } | |
1297 | static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show, | |
1298 | spacc_stat_irq_thresh_store); | |
1299 | ||
1300 | static struct spacc_alg ipsec_engine_algs[] = { | |
1301 | { | |
1302 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC, | |
1303 | .key_offs = 0, | |
1304 | .iv_offs = AES_MAX_KEY_SIZE, | |
1305 | .alg = { | |
1306 | .cra_name = "cbc(aes)", | |
1307 | .cra_driver_name = "cbc-aes-picoxcell", | |
1308 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | |
1309 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | |
d912bb76 | 1310 | CRYPTO_ALG_KERN_DRIVER_ONLY | |
ce921368 JI |
1311 | CRYPTO_ALG_ASYNC | |
1312 | CRYPTO_ALG_NEED_FALLBACK, | |
1313 | .cra_blocksize = AES_BLOCK_SIZE, | |
1314 | .cra_ctxsize = sizeof(struct spacc_ablk_ctx), | |
1315 | .cra_type = &crypto_ablkcipher_type, | |
1316 | .cra_module = THIS_MODULE, | |
1317 | .cra_ablkcipher = { | |
1318 | .setkey = spacc_aes_setkey, | |
1319 | .encrypt = spacc_ablk_encrypt, | |
1320 | .decrypt = spacc_ablk_decrypt, | |
1321 | .min_keysize = AES_MIN_KEY_SIZE, | |
1322 | .max_keysize = AES_MAX_KEY_SIZE, | |
1323 | .ivsize = AES_BLOCK_SIZE, | |
1324 | }, | |
1325 | .cra_init = spacc_ablk_cra_init, | |
1326 | .cra_exit = spacc_ablk_cra_exit, | |
1327 | }, | |
1328 | }, | |
1329 | { | |
1330 | .key_offs = 0, | |
1331 | .iv_offs = AES_MAX_KEY_SIZE, | |
1332 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB, | |
1333 | .alg = { | |
1334 | .cra_name = "ecb(aes)", | |
1335 | .cra_driver_name = "ecb-aes-picoxcell", | |
1336 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | |
1337 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | |
d912bb76 | 1338 | CRYPTO_ALG_KERN_DRIVER_ONLY | |
ce921368 JI |
1339 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, |
1340 | .cra_blocksize = AES_BLOCK_SIZE, | |
1341 | .cra_ctxsize = sizeof(struct spacc_ablk_ctx), | |
1342 | .cra_type = &crypto_ablkcipher_type, | |
1343 | .cra_module = THIS_MODULE, | |
1344 | .cra_ablkcipher = { | |
1345 | .setkey = spacc_aes_setkey, | |
1346 | .encrypt = spacc_ablk_encrypt, | |
1347 | .decrypt = spacc_ablk_decrypt, | |
1348 | .min_keysize = AES_MIN_KEY_SIZE, | |
1349 | .max_keysize = AES_MAX_KEY_SIZE, | |
1350 | }, | |
1351 | .cra_init = spacc_ablk_cra_init, | |
1352 | .cra_exit = spacc_ablk_cra_exit, | |
1353 | }, | |
1354 | }, | |
1355 | { | |
1356 | .key_offs = DES_BLOCK_SIZE, | |
1357 | .iv_offs = 0, | |
1358 | .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC, | |
1359 | .alg = { | |
1360 | .cra_name = "cbc(des)", | |
1361 | .cra_driver_name = "cbc-des-picoxcell", | |
1362 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | |
d912bb76 NM |
1363 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
1364 | CRYPTO_ALG_ASYNC | | |
1365 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
ce921368 JI |
1366 | .cra_blocksize = DES_BLOCK_SIZE, |
1367 | .cra_ctxsize = sizeof(struct spacc_ablk_ctx), | |
1368 | .cra_type = &crypto_ablkcipher_type, | |
1369 | .cra_module = THIS_MODULE, | |
1370 | .cra_ablkcipher = { | |
1371 | .setkey = spacc_des_setkey, | |
1372 | .encrypt = spacc_ablk_encrypt, | |
1373 | .decrypt = spacc_ablk_decrypt, | |
1374 | .min_keysize = DES_KEY_SIZE, | |
1375 | .max_keysize = DES_KEY_SIZE, | |
1376 | .ivsize = DES_BLOCK_SIZE, | |
1377 | }, | |
1378 | .cra_init = spacc_ablk_cra_init, | |
1379 | .cra_exit = spacc_ablk_cra_exit, | |
1380 | }, | |
1381 | }, | |
1382 | { | |
1383 | .key_offs = DES_BLOCK_SIZE, | |
1384 | .iv_offs = 0, | |
1385 | .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB, | |
1386 | .alg = { | |
1387 | .cra_name = "ecb(des)", | |
1388 | .cra_driver_name = "ecb-des-picoxcell", | |
1389 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | |
d912bb76 NM |
1390 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
1391 | CRYPTO_ALG_ASYNC | | |
1392 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
ce921368 JI |
1393 | .cra_blocksize = DES_BLOCK_SIZE, |
1394 | .cra_ctxsize = sizeof(struct spacc_ablk_ctx), | |
1395 | .cra_type = &crypto_ablkcipher_type, | |
1396 | .cra_module = THIS_MODULE, | |
1397 | .cra_ablkcipher = { | |
1398 | .setkey = spacc_des_setkey, | |
1399 | .encrypt = spacc_ablk_encrypt, | |
1400 | .decrypt = spacc_ablk_decrypt, | |
1401 | .min_keysize = DES_KEY_SIZE, | |
1402 | .max_keysize = DES_KEY_SIZE, | |
1403 | }, | |
1404 | .cra_init = spacc_ablk_cra_init, | |
1405 | .cra_exit = spacc_ablk_cra_exit, | |
1406 | }, | |
1407 | }, | |
1408 | { | |
1409 | .key_offs = DES_BLOCK_SIZE, | |
1410 | .iv_offs = 0, | |
1411 | .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC, | |
1412 | .alg = { | |
1413 | .cra_name = "cbc(des3_ede)", | |
1414 | .cra_driver_name = "cbc-des3-ede-picoxcell", | |
1415 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | |
d912bb76 NM |
1416 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
1417 | CRYPTO_ALG_ASYNC | | |
1418 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
ce921368 JI |
1419 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1420 | .cra_ctxsize = sizeof(struct spacc_ablk_ctx), | |
1421 | .cra_type = &crypto_ablkcipher_type, | |
1422 | .cra_module = THIS_MODULE, | |
1423 | .cra_ablkcipher = { | |
1424 | .setkey = spacc_des_setkey, | |
1425 | .encrypt = spacc_ablk_encrypt, | |
1426 | .decrypt = spacc_ablk_decrypt, | |
1427 | .min_keysize = DES3_EDE_KEY_SIZE, | |
1428 | .max_keysize = DES3_EDE_KEY_SIZE, | |
1429 | .ivsize = DES3_EDE_BLOCK_SIZE, | |
1430 | }, | |
1431 | .cra_init = spacc_ablk_cra_init, | |
1432 | .cra_exit = spacc_ablk_cra_exit, | |
1433 | }, | |
1434 | }, | |
1435 | { | |
1436 | .key_offs = DES_BLOCK_SIZE, | |
1437 | .iv_offs = 0, | |
1438 | .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB, | |
1439 | .alg = { | |
1440 | .cra_name = "ecb(des3_ede)", | |
1441 | .cra_driver_name = "ecb-des3-ede-picoxcell", | |
1442 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | |
d912bb76 NM |
1443 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
1444 | CRYPTO_ALG_ASYNC | | |
1445 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
ce921368 JI |
1446 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1447 | .cra_ctxsize = sizeof(struct spacc_ablk_ctx), | |
1448 | .cra_type = &crypto_ablkcipher_type, | |
1449 | .cra_module = THIS_MODULE, | |
1450 | .cra_ablkcipher = { | |
1451 | .setkey = spacc_des_setkey, | |
1452 | .encrypt = spacc_ablk_encrypt, | |
1453 | .decrypt = spacc_ablk_decrypt, | |
1454 | .min_keysize = DES3_EDE_KEY_SIZE, | |
1455 | .max_keysize = DES3_EDE_KEY_SIZE, | |
1456 | }, | |
1457 | .cra_init = spacc_ablk_cra_init, | |
1458 | .cra_exit = spacc_ablk_cra_exit, | |
1459 | }, | |
1460 | }, | |
1461 | { | |
1462 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | | |
1463 | SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC, | |
1464 | .key_offs = 0, | |
1465 | .iv_offs = AES_MAX_KEY_SIZE, | |
1466 | .alg = { | |
1467 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | |
1468 | .cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell", | |
1469 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | |
d912bb76 NM |
1470 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | |
1471 | CRYPTO_ALG_ASYNC | | |
1472 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
ce921368 JI |
1473 | .cra_blocksize = AES_BLOCK_SIZE, |
1474 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), | |
1475 | .cra_type = &crypto_aead_type, | |
1476 | .cra_module = THIS_MODULE, | |
1477 | .cra_aead = { | |
1478 | .setkey = spacc_aead_setkey, | |
1479 | .setauthsize = spacc_aead_setauthsize, | |
1480 | .encrypt = spacc_aead_encrypt, | |
1481 | .decrypt = spacc_aead_decrypt, | |
1482 | .givencrypt = spacc_aead_givencrypt, | |
1483 | .ivsize = AES_BLOCK_SIZE, | |
1484 | .maxauthsize = SHA1_DIGEST_SIZE, | |
1485 | }, | |
1486 | .cra_init = spacc_aead_cra_init, | |
1487 | .cra_exit = spacc_aead_cra_exit, | |
1488 | }, | |
1489 | }, | |
1490 | { | |
1491 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | | |
1492 | SPA_CTRL_HASH_ALG_SHA256 | | |
1493 | SPA_CTRL_HASH_MODE_HMAC, | |
1494 | .key_offs = 0, | |
1495 | .iv_offs = AES_MAX_KEY_SIZE, | |
1496 | .alg = { | |
1497 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | |
1498 | .cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell", | |
1499 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | |
d912bb76 NM |
1500 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | |
1501 | CRYPTO_ALG_ASYNC | | |
1502 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
ce921368 JI |
1503 | .cra_blocksize = AES_BLOCK_SIZE, |
1504 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), | |
1505 | .cra_type = &crypto_aead_type, | |
1506 | .cra_module = THIS_MODULE, | |
1507 | .cra_aead = { | |
1508 | .setkey = spacc_aead_setkey, | |
1509 | .setauthsize = spacc_aead_setauthsize, | |
1510 | .encrypt = spacc_aead_encrypt, | |
1511 | .decrypt = spacc_aead_decrypt, | |
1512 | .givencrypt = spacc_aead_givencrypt, | |
1513 | .ivsize = AES_BLOCK_SIZE, | |
1514 | .maxauthsize = SHA256_DIGEST_SIZE, | |
1515 | }, | |
1516 | .cra_init = spacc_aead_cra_init, | |
1517 | .cra_exit = spacc_aead_cra_exit, | |
1518 | }, | |
1519 | }, | |
1520 | { | |
1521 | .key_offs = 0, | |
1522 | .iv_offs = AES_MAX_KEY_SIZE, | |
1523 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | | |
1524 | SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC, | |
1525 | .alg = { | |
1526 | .cra_name = "authenc(hmac(md5),cbc(aes))", | |
1527 | .cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell", | |
1528 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | |
d912bb76 NM |
1529 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | |
1530 | CRYPTO_ALG_ASYNC | | |
1531 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
ce921368 JI |
1532 | .cra_blocksize = AES_BLOCK_SIZE, |
1533 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), | |
1534 | .cra_type = &crypto_aead_type, | |
1535 | .cra_module = THIS_MODULE, | |
1536 | .cra_aead = { | |
1537 | .setkey = spacc_aead_setkey, | |
1538 | .setauthsize = spacc_aead_setauthsize, | |
1539 | .encrypt = spacc_aead_encrypt, | |
1540 | .decrypt = spacc_aead_decrypt, | |
1541 | .givencrypt = spacc_aead_givencrypt, | |
1542 | .ivsize = AES_BLOCK_SIZE, | |
1543 | .maxauthsize = MD5_DIGEST_SIZE, | |
1544 | }, | |
1545 | .cra_init = spacc_aead_cra_init, | |
1546 | .cra_exit = spacc_aead_cra_exit, | |
1547 | }, | |
1548 | }, | |
1549 | { | |
1550 | .key_offs = DES_BLOCK_SIZE, | |
1551 | .iv_offs = 0, | |
1552 | .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC | | |
1553 | SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC, | |
1554 | .alg = { | |
1555 | .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", | |
1556 | .cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell", | |
1557 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | |
d912bb76 NM |
1558 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | |
1559 | CRYPTO_ALG_ASYNC | | |
1560 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
ce921368 JI |
1561 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1562 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), | |
1563 | .cra_type = &crypto_aead_type, | |
1564 | .cra_module = THIS_MODULE, | |
1565 | .cra_aead = { | |
1566 | .setkey = spacc_aead_setkey, | |
1567 | .setauthsize = spacc_aead_setauthsize, | |
1568 | .encrypt = spacc_aead_encrypt, | |
1569 | .decrypt = spacc_aead_decrypt, | |
1570 | .givencrypt = spacc_aead_givencrypt, | |
1571 | .ivsize = DES3_EDE_BLOCK_SIZE, | |
1572 | .maxauthsize = SHA1_DIGEST_SIZE, | |
1573 | }, | |
1574 | .cra_init = spacc_aead_cra_init, | |
1575 | .cra_exit = spacc_aead_cra_exit, | |
1576 | }, | |
1577 | }, | |
1578 | { | |
1579 | .key_offs = DES_BLOCK_SIZE, | |
1580 | .iv_offs = 0, | |
1581 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | | |
1582 | SPA_CTRL_HASH_ALG_SHA256 | | |
1583 | SPA_CTRL_HASH_MODE_HMAC, | |
1584 | .alg = { | |
1585 | .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", | |
1586 | .cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell", | |
1587 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | |
d912bb76 NM |
1588 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | |
1589 | CRYPTO_ALG_ASYNC | | |
1590 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
ce921368 JI |
1591 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1592 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), | |
1593 | .cra_type = &crypto_aead_type, | |
1594 | .cra_module = THIS_MODULE, | |
1595 | .cra_aead = { | |
1596 | .setkey = spacc_aead_setkey, | |
1597 | .setauthsize = spacc_aead_setauthsize, | |
1598 | .encrypt = spacc_aead_encrypt, | |
1599 | .decrypt = spacc_aead_decrypt, | |
1600 | .givencrypt = spacc_aead_givencrypt, | |
1601 | .ivsize = DES3_EDE_BLOCK_SIZE, | |
1602 | .maxauthsize = SHA256_DIGEST_SIZE, | |
1603 | }, | |
1604 | .cra_init = spacc_aead_cra_init, | |
1605 | .cra_exit = spacc_aead_cra_exit, | |
1606 | }, | |
1607 | }, | |
1608 | { | |
1609 | .key_offs = DES_BLOCK_SIZE, | |
1610 | .iv_offs = 0, | |
1611 | .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC | | |
1612 | SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC, | |
1613 | .alg = { | |
1614 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))", | |
1615 | .cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell", | |
1616 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | |
d912bb76 NM |
1617 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | |
1618 | CRYPTO_ALG_ASYNC | | |
1619 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
ce921368 JI |
1620 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1621 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), | |
1622 | .cra_type = &crypto_aead_type, | |
1623 | .cra_module = THIS_MODULE, | |
1624 | .cra_aead = { | |
1625 | .setkey = spacc_aead_setkey, | |
1626 | .setauthsize = spacc_aead_setauthsize, | |
1627 | .encrypt = spacc_aead_encrypt, | |
1628 | .decrypt = spacc_aead_decrypt, | |
1629 | .givencrypt = spacc_aead_givencrypt, | |
1630 | .ivsize = DES3_EDE_BLOCK_SIZE, | |
1631 | .maxauthsize = MD5_DIGEST_SIZE, | |
1632 | }, | |
1633 | .cra_init = spacc_aead_cra_init, | |
1634 | .cra_exit = spacc_aead_cra_exit, | |
1635 | }, | |
1636 | }, | |
1637 | }; | |
1638 | ||
1639 | static struct spacc_alg l2_engine_algs[] = { | |
1640 | { | |
1641 | .key_offs = 0, | |
1642 | .iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN, | |
1643 | .ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI | | |
1644 | SPA_CTRL_CIPH_MODE_F8, | |
1645 | .alg = { | |
1646 | .cra_name = "f8(kasumi)", | |
1647 | .cra_driver_name = "f8-kasumi-picoxcell", | |
1648 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | |
d912bb76 NM |
1649 | .cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | |
1650 | CRYPTO_ALG_ASYNC | | |
1651 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
ce921368 JI |
1652 | .cra_blocksize = 8, |
1653 | .cra_ctxsize = sizeof(struct spacc_ablk_ctx), | |
1654 | .cra_type = &crypto_ablkcipher_type, | |
1655 | .cra_module = THIS_MODULE, | |
1656 | .cra_ablkcipher = { | |
1657 | .setkey = spacc_kasumi_f8_setkey, | |
1658 | .encrypt = spacc_ablk_encrypt, | |
1659 | .decrypt = spacc_ablk_decrypt, | |
1660 | .min_keysize = 16, | |
1661 | .max_keysize = 16, | |
1662 | .ivsize = 8, | |
1663 | }, | |
1664 | .cra_init = spacc_ablk_cra_init, | |
1665 | .cra_exit = spacc_ablk_cra_exit, | |
1666 | }, | |
1667 | }, | |
1668 | }; | |
1669 | ||
30343ef1 JI |
1670 | #ifdef CONFIG_OF |
1671 | static const struct of_device_id spacc_of_id_table[] = { | |
1672 | { .compatible = "picochip,spacc-ipsec" }, | |
1673 | { .compatible = "picochip,spacc-l2" }, | |
1674 | {} | |
1675 | }; | |
30343ef1 JI |
1676 | #endif /* CONFIG_OF */ |
1677 | ||
1678 | static bool spacc_is_compatible(struct platform_device *pdev, | |
1679 | const char *spacc_type) | |
1680 | { | |
1681 | const struct platform_device_id *platid = platform_get_device_id(pdev); | |
1682 | ||
1683 | if (platid && !strcmp(platid->name, spacc_type)) | |
1684 | return true; | |
1685 | ||
1686 | #ifdef CONFIG_OF | |
1687 | if (of_device_is_compatible(pdev->dev.of_node, spacc_type)) | |
1688 | return true; | |
1689 | #endif /* CONFIG_OF */ | |
1690 | ||
1691 | return false; | |
1692 | } | |
1693 | ||
49cfe4db | 1694 | static int spacc_probe(struct platform_device *pdev) |
ce921368 JI |
1695 | { |
1696 | int i, err, ret = -EINVAL; | |
1697 | struct resource *mem, *irq; | |
1698 | struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine), | |
1699 | GFP_KERNEL); | |
1700 | if (!engine) | |
1701 | return -ENOMEM; | |
1702 | ||
30343ef1 | 1703 | if (spacc_is_compatible(pdev, "picochip,spacc-ipsec")) { |
c3f4200f JI |
1704 | engine->max_ctxs = SPACC_CRYPTO_IPSEC_MAX_CTXS; |
1705 | engine->cipher_pg_sz = SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ; | |
1706 | engine->hash_pg_sz = SPACC_CRYPTO_IPSEC_HASH_PG_SZ; | |
1707 | engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ; | |
1708 | engine->algs = ipsec_engine_algs; | |
1709 | engine->num_algs = ARRAY_SIZE(ipsec_engine_algs); | |
30343ef1 | 1710 | } else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) { |
c3f4200f JI |
1711 | engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS; |
1712 | engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ; | |
1713 | engine->hash_pg_sz = SPACC_CRYPTO_L2_HASH_PG_SZ; | |
1714 | engine->fifo_sz = SPACC_CRYPTO_L2_FIFO_SZ; | |
1715 | engine->algs = l2_engine_algs; | |
1716 | engine->num_algs = ARRAY_SIZE(l2_engine_algs); | |
1717 | } else { | |
1718 | return -EINVAL; | |
1719 | } | |
1720 | ||
1721 | engine->name = dev_name(&pdev->dev); | |
ce921368 JI |
1722 | |
1723 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
32af1e18 JH |
1724 | engine->regs = devm_ioremap_resource(&pdev->dev, mem); |
1725 | if (IS_ERR(engine->regs)) | |
1726 | return PTR_ERR(engine->regs); | |
1727 | ||
ce921368 | 1728 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
32af1e18 | 1729 | if (!irq) { |
ce921368 JI |
1730 | dev_err(&pdev->dev, "no memory/irq resource for engine\n"); |
1731 | return -ENXIO; | |
1732 | } | |
1733 | ||
ce921368 JI |
1734 | if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0, |
1735 | engine->name, engine)) { | |
1736 | dev_err(engine->dev, "failed to request IRQ\n"); | |
1737 | return -EBUSY; | |
1738 | } | |
1739 | ||
1740 | engine->dev = &pdev->dev; | |
1741 | engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET; | |
1742 | engine->hash_key_base = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET; | |
1743 | ||
1744 | engine->req_pool = dmam_pool_create(engine->name, engine->dev, | |
1745 | MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K); | |
1746 | if (!engine->req_pool) | |
1747 | return -ENOMEM; | |
1748 | ||
1749 | spin_lock_init(&engine->hw_lock); | |
1750 | ||
4efae8c9 | 1751 | engine->clk = clk_get(&pdev->dev, "ref"); |
ce921368 JI |
1752 | if (IS_ERR(engine->clk)) { |
1753 | dev_info(&pdev->dev, "clk unavailable\n"); | |
1754 | device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); | |
1755 | return PTR_ERR(engine->clk); | |
1756 | } | |
1757 | ||
1758 | if (clk_enable(engine->clk)) { | |
1759 | dev_info(&pdev->dev, "unable to enable clk\n"); | |
1760 | clk_put(engine->clk); | |
1761 | return -EIO; | |
1762 | } | |
1763 | ||
1764 | err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); | |
1765 | if (err) { | |
1766 | clk_disable(engine->clk); | |
1767 | clk_put(engine->clk); | |
1768 | return err; | |
1769 | } | |
1770 | ||
1771 | ||
1772 | /* | |
1773 | * Use an IRQ threshold of 50% as a default. This seems to be a | |
1774 | * reasonable trade off of latency against throughput but can be | |
1775 | * changed at runtime. | |
1776 | */ | |
1777 | engine->stat_irq_thresh = (engine->fifo_sz / 2); | |
1778 | ||
1779 | /* | |
1780 | * Configure the interrupts. We only use the STAT_CNT interrupt as we | |
1781 | * only submit a new packet for processing when we complete another in | |
1782 | * the queue. This minimizes time spent in the interrupt handler. | |
1783 | */ | |
1784 | writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET, | |
1785 | engine->regs + SPA_IRQ_CTRL_REG_OFFSET); | |
1786 | writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN, | |
1787 | engine->regs + SPA_IRQ_EN_REG_OFFSET); | |
1788 | ||
1789 | setup_timer(&engine->packet_timeout, spacc_packet_timeout, | |
1790 | (unsigned long)engine); | |
1791 | ||
1792 | INIT_LIST_HEAD(&engine->pending); | |
1793 | INIT_LIST_HEAD(&engine->completed); | |
1794 | INIT_LIST_HEAD(&engine->in_progress); | |
1795 | engine->in_flight = 0; | |
1796 | tasklet_init(&engine->complete, spacc_spacc_complete, | |
1797 | (unsigned long)engine); | |
1798 | ||
1799 | platform_set_drvdata(pdev, engine); | |
1800 | ||
1801 | INIT_LIST_HEAD(&engine->registered_algs); | |
1802 | for (i = 0; i < engine->num_algs; ++i) { | |
1803 | engine->algs[i].engine = engine; | |
1804 | err = crypto_register_alg(&engine->algs[i].alg); | |
1805 | if (!err) { | |
1806 | list_add_tail(&engine->algs[i].entry, | |
1807 | &engine->registered_algs); | |
1808 | ret = 0; | |
1809 | } | |
1810 | if (err) | |
1811 | dev_err(engine->dev, "failed to register alg \"%s\"\n", | |
1812 | engine->algs[i].alg.cra_name); | |
1813 | else | |
1814 | dev_dbg(engine->dev, "registered alg \"%s\"\n", | |
1815 | engine->algs[i].alg.cra_name); | |
1816 | } | |
1817 | ||
1818 | return ret; | |
1819 | } | |
1820 | ||
49cfe4db | 1821 | static int spacc_remove(struct platform_device *pdev) |
ce921368 JI |
1822 | { |
1823 | struct spacc_alg *alg, *next; | |
1824 | struct spacc_engine *engine = platform_get_drvdata(pdev); | |
1825 | ||
1826 | del_timer_sync(&engine->packet_timeout); | |
1827 | device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); | |
1828 | ||
1829 | list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) { | |
1830 | list_del(&alg->entry); | |
1831 | crypto_unregister_alg(&alg->alg); | |
1832 | } | |
1833 | ||
1834 | clk_disable(engine->clk); | |
1835 | clk_put(engine->clk); | |
1836 | ||
1837 | return 0; | |
1838 | } | |
1839 | ||
c3f4200f JI |
1840 | static const struct platform_device_id spacc_id_table[] = { |
1841 | { "picochip,spacc-ipsec", }, | |
1842 | { "picochip,spacc-l2", }, | |
14198dd6 | 1843 | { } |
ce921368 JI |
1844 | }; |
1845 | ||
c3f4200f JI |
1846 | static struct platform_driver spacc_driver = { |
1847 | .probe = spacc_probe, | |
49cfe4db | 1848 | .remove = spacc_remove, |
ce921368 | 1849 | .driver = { |
c3f4200f | 1850 | .name = "picochip,spacc", |
ce921368 JI |
1851 | #ifdef CONFIG_PM |
1852 | .pm = &spacc_pm_ops, | |
1853 | #endif /* CONFIG_PM */ | |
5cec26e9 | 1854 | .of_match_table = of_match_ptr(spacc_of_id_table), |
ce921368 | 1855 | }, |
c3f4200f | 1856 | .id_table = spacc_id_table, |
ce921368 JI |
1857 | }; |
1858 | ||
741e8c2d | 1859 | module_platform_driver(spacc_driver); |
ce921368 JI |
1860 | |
1861 | MODULE_LICENSE("GPL"); | |
1862 | MODULE_AUTHOR("Jamie Iles"); |