Merge tag 'renesas-sh-drivers-for-v3.16' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / staging / sep / sep_crypto.c
1 /*
2 *
3 * sep_crypto.c - Crypto interface structures
4 *
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2010 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * CONTACTS:
22 *
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.02.22 Enable Kernel Crypto
31 *
32 */
33
34 /* #define DEBUG */
35 #include <linux/module.h>
36 #include <linux/miscdevice.h>
37 #include <linux/fs.h>
38 #include <linux/cdev.h>
39 #include <linux/kdev_t.h>
40 #include <linux/mutex.h>
41 #include <linux/sched.h>
42 #include <linux/mm.h>
43 #include <linux/poll.h>
44 #include <linux/wait.h>
45 #include <linux/pci.h>
46 #include <linux/pm_runtime.h>
47 #include <linux/err.h>
48 #include <linux/device.h>
49 #include <linux/errno.h>
50 #include <linux/interrupt.h>
51 #include <linux/kernel.h>
52 #include <linux/clk.h>
53 #include <linux/irq.h>
54 #include <linux/io.h>
55 #include <linux/platform_device.h>
56 #include <linux/list.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/delay.h>
59 #include <linux/jiffies.h>
60 #include <linux/workqueue.h>
61 #include <linux/crypto.h>
62 #include <crypto/internal/hash.h>
63 #include <crypto/scatterwalk.h>
64 #include <crypto/sha.h>
65 #include <crypto/md5.h>
66 #include <crypto/aes.h>
67 #include <crypto/des.h>
68 #include <crypto/hash.h>
69 #include "sep_driver_hw_defs.h"
70 #include "sep_driver_config.h"
71 #include "sep_driver_api.h"
72 #include "sep_dev.h"
73 #include "sep_crypto.h"
74
75 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
76
77 /* Globals for queuing */
78 static spinlock_t queue_lock;
79 static struct crypto_queue sep_queue;
80
81 /* Declare of dequeuer */
82 static void sep_dequeuer(void *data);
83
84 /* TESTING */
85 /**
86 * sep_do_callback
87 * @work: pointer to work_struct
88 * This is what is called by the queue; it is generic so that it
89 * can be used by any type of operation as each different callback
90 * function can use the data parameter in its own way
91 */
92 static void sep_do_callback(struct work_struct *work)
93 {
94 struct sep_work_struct *sep_work = container_of(work,
95 struct sep_work_struct, work);
96
97 if (sep_work != NULL) {
98 (sep_work->callback)(sep_work->data);
99 kfree(sep_work);
100 } else {
101 pr_debug("sep crypto: do callback - NULL container\n");
102 }
103 }
104
105 /**
106 * sep_submit_work
107 * @work_queue: pointer to struct_workqueue
108 * @funct: pointer to function to execute
109 * @data: pointer to data; function will know
110 * how to use it
111 * This is a generic API to submit something to
112 * the queue. The callback function will depend
113 * on what operation is to be done
114 */
115 static int sep_submit_work(struct workqueue_struct *work_queue,
116 void (*funct)(void *),
117 void *data)
118 {
119 struct sep_work_struct *sep_work;
120 int result;
121
122 sep_work = kmalloc(sizeof(struct sep_work_struct), GFP_ATOMIC);
123
124 if (sep_work == NULL) {
125 pr_debug("sep crypto: cant allocate work structure\n");
126 return -ENOMEM;
127 }
128
129 sep_work->callback = funct;
130 sep_work->data = data;
131 INIT_WORK(&sep_work->work, sep_do_callback);
132 result = queue_work(work_queue, &sep_work->work);
133 if (!result) {
134 pr_debug("sep_crypto: queue_work failed\n");
135 return -EINVAL;
136 }
137 return 0;
138 }
139
140 /**
141 * sep_alloc_sg_buf -
142 * @sep: pointer to struct sep_device
143 * @size: total size of area
144 * @block_size: minimum size of chunks
145 * each page is minimum or modulo this size
146 * @returns: pointer to struct scatterlist for new
147 * buffer
148 **/
149 static struct scatterlist *sep_alloc_sg_buf(
150 struct sep_device *sep,
151 size_t size,
152 size_t block_size)
153 {
154 u32 nbr_pages;
155 u32 ct1;
156 void *buf;
157 size_t current_size;
158 size_t real_page_size;
159
160 struct scatterlist *sg, *sg_temp;
161
162 if (size == 0)
163 return NULL;
164
165 dev_dbg(&sep->pdev->dev, "sep alloc sg buf\n");
166
167 current_size = 0;
168 nbr_pages = 0;
169 real_page_size = PAGE_SIZE - (PAGE_SIZE % block_size);
170 /**
171 * The size of each page must be modulo of the operation
172 * block size; increment by the modified page size until
173 * the total size is reached, then you have the number of
174 * pages
175 */
176 while (current_size < size) {
177 current_size += real_page_size;
178 nbr_pages += 1;
179 }
180
181 sg = kmalloc_array(nbr_pages, sizeof(struct scatterlist), GFP_ATOMIC);
182 if (!sg)
183 return NULL;
184
185 sg_init_table(sg, nbr_pages);
186
187 current_size = 0;
188 sg_temp = sg;
189 for (ct1 = 0; ct1 < nbr_pages; ct1 += 1) {
190 buf = (void *)get_zeroed_page(GFP_ATOMIC);
191 if (!buf) {
192 dev_warn(&sep->pdev->dev,
193 "Cannot allocate page for new buffer\n");
194 kfree(sg);
195 return NULL;
196 }
197
198 sg_set_buf(sg_temp, buf, real_page_size);
199 if ((size - current_size) > real_page_size) {
200 sg_temp->length = real_page_size;
201 current_size += real_page_size;
202 } else {
203 sg_temp->length = (size - current_size);
204 current_size = size;
205 }
206 sg_temp = sg_next(sg);
207 }
208 return sg;
209 }
210
211 /**
212 * sep_free_sg_buf -
213 * @sg: pointer to struct scatterlist; points to area to free
214 */
215 static void sep_free_sg_buf(struct scatterlist *sg)
216 {
217 struct scatterlist *sg_temp = sg;
218 while (sg_temp) {
219 free_page((unsigned long)sg_virt(sg_temp));
220 sg_temp = sg_next(sg_temp);
221 }
222 kfree(sg);
223 }
224
225 /**
226 * sep_copy_sg -
227 * @sep: pointer to struct sep_device
228 * @sg_src: pointer to struct scatterlist for source
229 * @sg_dst: pointer to struct scatterlist for destination
230 * @size: size (in bytes) of data to copy
231 *
232 * Copy data from one scatterlist to another; both must
233 * be the same size
234 */
235 static void sep_copy_sg(
236 struct sep_device *sep,
237 struct scatterlist *sg_src,
238 struct scatterlist *sg_dst,
239 size_t size)
240 {
241 u32 seg_size;
242 u32 in_offset, out_offset;
243
244 u32 count = 0;
245 struct scatterlist *sg_src_tmp = sg_src;
246 struct scatterlist *sg_dst_tmp = sg_dst;
247 in_offset = 0;
248 out_offset = 0;
249
250 dev_dbg(&sep->pdev->dev, "sep copy sg\n");
251
252 if ((sg_src == NULL) || (sg_dst == NULL) || (size == 0))
253 return;
254
255 dev_dbg(&sep->pdev->dev, "sep copy sg not null\n");
256
257 while (count < size) {
258 if ((sg_src_tmp->length - in_offset) >
259 (sg_dst_tmp->length - out_offset))
260 seg_size = sg_dst_tmp->length - out_offset;
261 else
262 seg_size = sg_src_tmp->length - in_offset;
263
264 if (seg_size > (size - count))
265 seg_size = (size = count);
266
267 memcpy(sg_virt(sg_dst_tmp) + out_offset,
268 sg_virt(sg_src_tmp) + in_offset,
269 seg_size);
270
271 in_offset += seg_size;
272 out_offset += seg_size;
273 count += seg_size;
274
275 if (in_offset >= sg_src_tmp->length) {
276 sg_src_tmp = sg_next(sg_src_tmp);
277 in_offset = 0;
278 }
279
280 if (out_offset >= sg_dst_tmp->length) {
281 sg_dst_tmp = sg_next(sg_dst_tmp);
282 out_offset = 0;
283 }
284 }
285 }
286
287 /**
288 * sep_oddball_pages -
289 * @sep: pointer to struct sep_device
290 * @sg: pointer to struct scatterlist - buffer to check
291 * @size: total data size
292 * @blocksize: minimum block size; must be multiples of this size
293 * @to_copy: 1 means do copy, 0 means do not copy
294 * @new_sg: pointer to location to put pointer to new sg area
295 * @returns: 1 if new scatterlist is needed; 0 if not needed;
296 * error value if operation failed
297 *
298 * The SEP device requires all pages to be multiples of the
299 * minimum block size appropriate for the operation
300 * This function check all pages; if any are oddball sizes
301 * (not multiple of block sizes), it creates a new scatterlist.
302 * If the to_copy parameter is set to 1, then a scatter list
303 * copy is performed. The pointer to the new scatterlist is
304 * put into the address supplied by the new_sg parameter; if
305 * no new scatterlist is needed, then a NULL is put into
306 * the location at new_sg.
307 *
308 */
309 static int sep_oddball_pages(
310 struct sep_device *sep,
311 struct scatterlist *sg,
312 size_t data_size,
313 u32 block_size,
314 struct scatterlist **new_sg,
315 u32 do_copy)
316 {
317 struct scatterlist *sg_temp;
318 u32 flag;
319 u32 nbr_pages, page_count;
320
321 dev_dbg(&sep->pdev->dev, "sep oddball\n");
322 if ((sg == NULL) || (data_size == 0) || (data_size < block_size))
323 return 0;
324
325 dev_dbg(&sep->pdev->dev, "sep oddball not null\n");
326 flag = 0;
327 nbr_pages = 0;
328 page_count = 0;
329 sg_temp = sg;
330
331 while (sg_temp) {
332 nbr_pages += 1;
333 sg_temp = sg_next(sg_temp);
334 }
335
336 sg_temp = sg;
337 while ((sg_temp) && (flag == 0)) {
338 page_count += 1;
339 if (sg_temp->length % block_size)
340 flag = 1;
341 else
342 sg_temp = sg_next(sg_temp);
343 }
344
345 /* Do not process if last (or only) page is oddball */
346 if (nbr_pages == page_count)
347 flag = 0;
348
349 if (flag) {
350 dev_dbg(&sep->pdev->dev, "sep oddball processing\n");
351 *new_sg = sep_alloc_sg_buf(sep, data_size, block_size);
352 if (*new_sg == NULL) {
353 dev_warn(&sep->pdev->dev, "cannot allocate new sg\n");
354 return -ENOMEM;
355 }
356
357 if (do_copy)
358 sep_copy_sg(sep, sg, *new_sg, data_size);
359
360 return 1;
361 } else {
362 return 0;
363 }
364 }
365
366 /**
367 * sep_copy_offset_sg -
368 * @sep: pointer to struct sep_device;
369 * @sg: pointer to struct scatterlist
370 * @offset: offset into scatterlist memory
371 * @dst: place to put data
372 * @len: length of data
373 * @returns: number of bytes copies
374 *
375 * This copies data from scatterlist buffer
376 * offset from beginning - it is needed for
377 * handling tail data in hash
378 */
379 static size_t sep_copy_offset_sg(
380 struct sep_device *sep,
381 struct scatterlist *sg,
382 u32 offset,
383 void *dst,
384 u32 len)
385 {
386 size_t page_start;
387 size_t page_end;
388 size_t offset_within_page;
389 size_t length_within_page;
390 size_t length_remaining;
391 size_t current_offset;
392
393 /* Find which page is beginning of segment */
394 page_start = 0;
395 page_end = sg->length;
396 while ((sg) && (offset > page_end)) {
397 page_start += sg->length;
398 sg = sg_next(sg);
399 if (sg)
400 page_end += sg->length;
401 }
402
403 if (sg == NULL)
404 return -ENOMEM;
405
406 offset_within_page = offset - page_start;
407 if ((sg->length - offset_within_page) >= len) {
408 /* All within this page */
409 memcpy(dst, sg_virt(sg) + offset_within_page, len);
410 return len;
411 } else {
412 /* Scattered multiple pages */
413 current_offset = 0;
414 length_remaining = len;
415 while ((sg) && (current_offset < len)) {
416 length_within_page = sg->length - offset_within_page;
417 if (length_within_page >= length_remaining) {
418 memcpy(dst+current_offset,
419 sg_virt(sg) + offset_within_page,
420 length_remaining);
421 length_remaining = 0;
422 current_offset = len;
423 } else {
424 memcpy(dst+current_offset,
425 sg_virt(sg) + offset_within_page,
426 length_within_page);
427 length_remaining -= length_within_page;
428 current_offset += length_within_page;
429 offset_within_page = 0;
430 sg = sg_next(sg);
431 }
432 }
433
434 if (sg == NULL)
435 return -ENOMEM;
436 }
437 return len;
438 }
439
440 /**
441 * partial_overlap -
442 * @src_ptr: source pointer
443 * @dst_ptr: destination pointer
444 * @nbytes: number of bytes
445 * @returns: 0 for success; -1 for failure
446 * We cannot have any partial overlap. Total overlap
447 * where src is the same as dst is okay
448 */
449 static int partial_overlap(void *src_ptr, void *dst_ptr, u32 nbytes)
450 {
451 /* Check for partial overlap */
452 if (src_ptr != dst_ptr) {
453 if (src_ptr < dst_ptr) {
454 if ((src_ptr + nbytes) > dst_ptr)
455 return -EINVAL;
456 } else {
457 if ((dst_ptr + nbytes) > src_ptr)
458 return -EINVAL;
459 }
460 }
461
462 return 0;
463 }
464
465 /* Debug - prints only if DEBUG is defined */
466 static void sep_dump_ivs(struct ablkcipher_request *req, char *reason)
467
468 {
469 unsigned char *cptr;
470 struct sep_aes_internal_context *aes_internal;
471 struct sep_des_internal_context *des_internal;
472 int ct1;
473
474 struct this_task_ctx *ta_ctx;
475 struct crypto_ablkcipher *tfm;
476 struct sep_system_ctx *sctx;
477
478 ta_ctx = ablkcipher_request_ctx(req);
479 tfm = crypto_ablkcipher_reqtfm(req);
480 sctx = crypto_ablkcipher_ctx(tfm);
481
482 dev_dbg(&ta_ctx->sep_used->pdev->dev, "IV DUMP - %s\n", reason);
483 if ((ta_ctx->current_request == DES_CBC) &&
484 (ta_ctx->des_opmode == SEP_DES_CBC)) {
485
486 des_internal = (struct sep_des_internal_context *)
487 sctx->des_private_ctx.ctx_buf;
488 /* print vendor */
489 dev_dbg(&ta_ctx->sep_used->pdev->dev,
490 "sep - vendor iv for DES\n");
491 cptr = (unsigned char *)des_internal->iv_context;
492 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
493 dev_dbg(&ta_ctx->sep_used->pdev->dev,
494 "%02x\n", *(cptr + ct1));
495
496 /* print walk */
497 dev_dbg(&ta_ctx->sep_used->pdev->dev,
498 "sep - walk from kernel crypto iv for DES\n");
499 cptr = (unsigned char *)ta_ctx->walk.iv;
500 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
501 dev_dbg(&ta_ctx->sep_used->pdev->dev,
502 "%02x\n", *(cptr + ct1));
503 } else if ((ta_ctx->current_request == AES_CBC) &&
504 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
505
506 aes_internal = (struct sep_aes_internal_context *)
507 sctx->aes_private_ctx.cbuff;
508 /* print vendor */
509 dev_dbg(&ta_ctx->sep_used->pdev->dev,
510 "sep - vendor iv for AES\n");
511 cptr = (unsigned char *)aes_internal->aes_ctx_iv;
512 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
513 dev_dbg(&ta_ctx->sep_used->pdev->dev,
514 "%02x\n", *(cptr + ct1));
515
516 /* print walk */
517 dev_dbg(&ta_ctx->sep_used->pdev->dev,
518 "sep - walk from kernel crypto iv for AES\n");
519 cptr = (unsigned char *)ta_ctx->walk.iv;
520 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
521 dev_dbg(&ta_ctx->sep_used->pdev->dev,
522 "%02x\n", *(cptr + ct1));
523 }
524 }
525
526 /**
527 * RFC2451: Weak key check
528 * Returns: 1 (weak), 0 (not weak)
529 */
530 static int sep_weak_key(const u8 *key, unsigned int keylen)
531 {
532 static const u8 parity[] = {
533 8, 1, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 2, 8,
534 0, 8, 8, 0, 8, 0, 0, 8, 8,
535 0, 0, 8, 0, 8, 8, 3,
536 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
537 8, 0, 0, 8, 0, 8, 8, 0, 0,
538 8, 8, 0, 8, 0, 0, 8,
539 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
540 8, 0, 0, 8, 0, 8, 8, 0, 0,
541 8, 8, 0, 8, 0, 0, 8,
542 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
543 0, 8, 8, 0, 8, 0, 0, 8, 8,
544 0, 0, 8, 0, 8, 8, 0,
545 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
546 8, 0, 0, 8, 0, 8, 8, 0, 0,
547 8, 8, 0, 8, 0, 0, 8,
548 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
549 0, 8, 8, 0, 8, 0, 0, 8, 8,
550 0, 0, 8, 0, 8, 8, 0,
551 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
552 0, 8, 8, 0, 8, 0, 0, 8, 8,
553 0, 0, 8, 0, 8, 8, 0,
554 4, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
555 8, 5, 0, 8, 0, 8, 8, 0, 0,
556 8, 8, 0, 8, 0, 6, 8,
557 };
558
559 u32 n, w;
560
561 n = parity[key[0]]; n <<= 4;
562 n |= parity[key[1]]; n <<= 4;
563 n |= parity[key[2]]; n <<= 4;
564 n |= parity[key[3]]; n <<= 4;
565 n |= parity[key[4]]; n <<= 4;
566 n |= parity[key[5]]; n <<= 4;
567 n |= parity[key[6]]; n <<= 4;
568 n |= parity[key[7]];
569 w = 0x88888888L;
570
571 /* 1 in 10^10 keys passes this test */
572 if (!((n - (w >> 3)) & w)) {
573 if (n < 0x41415151) {
574 if (n < 0x31312121) {
575 if (n < 0x14141515) {
576 /* 01 01 01 01 01 01 01 01 */
577 if (n == 0x11111111)
578 goto weak;
579 /* 01 1F 01 1F 01 0E 01 0E */
580 if (n == 0x13131212)
581 goto weak;
582 } else {
583 /* 01 E0 01 E0 01 F1 01 F1 */
584 if (n == 0x14141515)
585 goto weak;
586 /* 01 FE 01 FE 01 FE 01 FE */
587 if (n == 0x16161616)
588 goto weak;
589 }
590 } else {
591 if (n < 0x34342525) {
592 /* 1F 01 1F 01 0E 01 0E 01 */
593 if (n == 0x31312121)
594 goto weak;
595 /* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
596 if (n == 0x33332222)
597 goto weak;
598 } else {
599 /* 1F E0 1F E0 0E F1 0E F1 */
600 if (n == 0x34342525)
601 goto weak;
602 /* 1F FE 1F FE 0E FE 0E FE */
603 if (n == 0x36362626)
604 goto weak;
605 }
606 }
607 } else {
608 if (n < 0x61616161) {
609 if (n < 0x44445555) {
610 /* E0 01 E0 01 F1 01 F1 01 */
611 if (n == 0x41415151)
612 goto weak;
613 /* E0 1F E0 1F F1 0E F1 0E */
614 if (n == 0x43435252)
615 goto weak;
616 } else {
617 /* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
618 if (n == 0x44445555)
619 goto weak;
620 /* E0 FE E0 FE F1 FE F1 FE */
621 if (n == 0x46465656)
622 goto weak;
623 }
624 } else {
625 if (n < 0x64646565) {
626 /* FE 01 FE 01 FE 01 FE 01 */
627 if (n == 0x61616161)
628 goto weak;
629 /* FE 1F FE 1F FE 0E FE 0E */
630 if (n == 0x63636262)
631 goto weak;
632 } else {
633 /* FE E0 FE E0 FE F1 FE F1 */
634 if (n == 0x64646565)
635 goto weak;
636 /* FE FE FE FE FE FE FE FE */
637 if (n == 0x66666666)
638 goto weak;
639 }
640 }
641 }
642 }
643 return 0;
644 weak:
645 return 1;
646 }
647 /**
648 * sep_sg_nents
649 */
650 static u32 sep_sg_nents(struct scatterlist *sg)
651 {
652 u32 ct1 = 0;
653
654 while (sg) {
655 ct1 += 1;
656 sg = sg_next(sg);
657 }
658
659 return ct1;
660 }
661
662 /**
663 * sep_start_msg -
664 * @ta_ctx: pointer to struct this_task_ctx
665 * @returns: offset to place for the next word in the message
666 * Set up pointer in message pool for new message
667 */
668 static u32 sep_start_msg(struct this_task_ctx *ta_ctx)
669 {
670 u32 *word_ptr;
671
672 ta_ctx->msg_len_words = 2;
673 ta_ctx->msgptr = ta_ctx->msg;
674 memset(ta_ctx->msg, 0, SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
675 ta_ctx->msgptr += sizeof(u32) * 2;
676 word_ptr = (u32 *)ta_ctx->msgptr;
677 *word_ptr = SEP_START_MSG_TOKEN;
678 return sizeof(u32) * 2;
679 }
680
681 /**
682 * sep_end_msg -
683 * @ta_ctx: pointer to struct this_task_ctx
684 * @messages_offset: current message offset
685 * Returns: 0 for success; <0 otherwise
686 * End message; set length and CRC; and
687 * send interrupt to the SEP
688 */
689 static void sep_end_msg(struct this_task_ctx *ta_ctx, u32 msg_offset)
690 {
691 u32 *word_ptr;
692 /* Msg size goes into msg after token */
693 ta_ctx->msg_len_words = msg_offset / sizeof(u32) + 1;
694 word_ptr = (u32 *)ta_ctx->msgptr;
695 word_ptr += 1;
696 *word_ptr = ta_ctx->msg_len_words;
697
698 /* CRC (currently 0) goes at end of msg */
699 word_ptr = (u32 *)(ta_ctx->msgptr + msg_offset);
700 *word_ptr = 0;
701 }
702
703 /**
704 * sep_start_inbound_msg -
705 * @ta_ctx: pointer to struct this_task_ctx
706 * @msg_offset: offset to place for the next word in the message
707 * @returns: 0 for success; error value for failure
708 * Set up pointer in message pool for inbound message
709 */
710 static u32 sep_start_inbound_msg(struct this_task_ctx *ta_ctx, u32 *msg_offset)
711 {
712 u32 *word_ptr;
713 u32 token;
714 u32 error = SEP_OK;
715
716 *msg_offset = sizeof(u32) * 2;
717 word_ptr = (u32 *)ta_ctx->msgptr;
718 token = *word_ptr;
719 ta_ctx->msg_len_words = *(word_ptr + 1);
720
721 if (token != SEP_START_MSG_TOKEN) {
722 error = SEP_INVALID_START;
723 goto end_function;
724 }
725
726 end_function:
727
728 return error;
729 }
730
731 /**
732 * sep_write_msg -
733 * @ta_ctx: pointer to struct this_task_ctx
734 * @in_addr: pointer to start of parameter
735 * @size: size of parameter to copy (in bytes)
736 * @max_size: size to move up offset; SEP mesg is in word sizes
737 * @msg_offset: pointer to current offset (is updated)
738 * @byte_array: flag ti indicate whether endian must be changed
739 * Copies data into the message area from caller
740 */
741 static void sep_write_msg(struct this_task_ctx *ta_ctx, void *in_addr,
742 u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
743 {
744 u32 *word_ptr;
745 void *void_ptr;
746
747 void_ptr = ta_ctx->msgptr + *msg_offset;
748 word_ptr = (u32 *)void_ptr;
749 memcpy(void_ptr, in_addr, size);
750 *msg_offset += max_size;
751
752 /* Do we need to manipulate endian? */
753 if (byte_array) {
754 u32 i;
755
756 for (i = 0; i < ((size + 3) / 4); i += 1)
757 *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
758 }
759 }
760
761 /**
762 * sep_make_header
763 * @ta_ctx: pointer to struct this_task_ctx
764 * @msg_offset: pointer to current offset (is updated)
765 * @op_code: op code to put into message
766 * Puts op code into message and updates offset
767 */
768 static void sep_make_header(struct this_task_ctx *ta_ctx, u32 *msg_offset,
769 u32 op_code)
770 {
771 u32 *word_ptr;
772
773 *msg_offset = sep_start_msg(ta_ctx);
774 word_ptr = (u32 *)(ta_ctx->msgptr + *msg_offset);
775 *word_ptr = op_code;
776 *msg_offset += sizeof(u32);
777 }
778
779
780
781 /**
782 * sep_read_msg -
783 * @ta_ctx: pointer to struct this_task_ctx
784 * @in_addr: pointer to start of parameter
785 * @size: size of parameter to copy (in bytes)
786 * @max_size: size to move up offset; SEP mesg is in word sizes
787 * @msg_offset: pointer to current offset (is updated)
788 * @byte_array: flag ti indicate whether endian must be changed
789 * Copies data out of the message area to caller
790 */
791 static void sep_read_msg(struct this_task_ctx *ta_ctx, void *in_addr,
792 u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
793 {
794 u32 *word_ptr;
795 void *void_ptr;
796
797 void_ptr = ta_ctx->msgptr + *msg_offset;
798 word_ptr = (u32 *)void_ptr;
799
800 /* Do we need to manipulate endian? */
801 if (byte_array) {
802 u32 i;
803
804 for (i = 0; i < ((size + 3) / 4); i += 1)
805 *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
806 }
807
808 memcpy(in_addr, void_ptr, size);
809 *msg_offset += max_size;
810 }
811
812 /**
813 * sep_verify_op -
814 * @ta_ctx: pointer to struct this_task_ctx
815 * @op_code: expected op_code
816 * @msg_offset: pointer to current offset (is updated)
817 * @returns: 0 for success; error for failure
818 */
819 static u32 sep_verify_op(struct this_task_ctx *ta_ctx, u32 op_code,
820 u32 *msg_offset)
821 {
822 u32 error;
823 u32 in_ary[2];
824
825 struct sep_device *sep = ta_ctx->sep_used;
826
827 dev_dbg(&sep->pdev->dev, "dumping return message\n");
828 error = sep_start_inbound_msg(ta_ctx, msg_offset);
829 if (error) {
830 dev_warn(&sep->pdev->dev,
831 "sep_start_inbound_msg error\n");
832 return error;
833 }
834
835 sep_read_msg(ta_ctx, in_ary, sizeof(u32) * 2, sizeof(u32) * 2,
836 msg_offset, 0);
837
838 if (in_ary[0] != op_code) {
839 dev_warn(&sep->pdev->dev,
840 "sep got back wrong opcode\n");
841 dev_warn(&sep->pdev->dev,
842 "got back %x; expected %x\n",
843 in_ary[0], op_code);
844 return SEP_WRONG_OPCODE;
845 }
846
847 if (in_ary[1] != SEP_OK) {
848 dev_warn(&sep->pdev->dev,
849 "sep execution error\n");
850 dev_warn(&sep->pdev->dev,
851 "got back %x; expected %x\n",
852 in_ary[1], SEP_OK);
853 return in_ary[0];
854 }
855
856 return 0;
857 }
858
859 /**
860 * sep_read_context -
861 * @ta_ctx: pointer to struct this_task_ctx
862 * @msg_offset: point to current place in SEP msg; is updated
863 * @dst: pointer to place to put the context
864 * @len: size of the context structure (differs for crypro/hash)
865 * This function reads the context from the msg area
866 * There is a special way the vendor needs to have the maximum
867 * length calculated so that the msg_offset is updated properly;
868 * it skips over some words in the msg area depending on the size
869 * of the context
870 */
871 static void sep_read_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
872 void *dst, u32 len)
873 {
874 u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
875
876 sep_read_msg(ta_ctx, dst, len, max_length, msg_offset, 0);
877 }
878
879 /**
880 * sep_write_context -
881 * @ta_ctx: pointer to struct this_task_ctx
882 * @msg_offset: point to current place in SEP msg; is updated
883 * @src: pointer to the current context
884 * @len: size of the context structure (differs for crypro/hash)
885 * This function writes the context to the msg area
886 * There is a special way the vendor needs to have the maximum
887 * length calculated so that the msg_offset is updated properly;
888 * it skips over some words in the msg area depending on the size
889 * of the context
890 */
891 static void sep_write_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
892 void *src, u32 len)
893 {
894 u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
895
896 sep_write_msg(ta_ctx, src, len, max_length, msg_offset, 0);
897 }
898
899 /**
900 * sep_clear_out -
901 * @ta_ctx: pointer to struct this_task_ctx
902 * Clear out crypto related values in sep device structure
903 * to enable device to be used by anyone; either kernel
904 * crypto or userspace app via middleware
905 */
906 static void sep_clear_out(struct this_task_ctx *ta_ctx)
907 {
908 if (ta_ctx->src_sg_hold) {
909 sep_free_sg_buf(ta_ctx->src_sg_hold);
910 ta_ctx->src_sg_hold = NULL;
911 }
912
913 if (ta_ctx->dst_sg_hold) {
914 sep_free_sg_buf(ta_ctx->dst_sg_hold);
915 ta_ctx->dst_sg_hold = NULL;
916 }
917
918 ta_ctx->src_sg = NULL;
919 ta_ctx->dst_sg = NULL;
920
921 sep_free_dma_table_data_handler(ta_ctx->sep_used, &ta_ctx->dma_ctx);
922
923 if (ta_ctx->i_own_sep) {
924 /**
925 * The following unlocks the sep and makes it available
926 * to any other application
927 * First, null out crypto entries in sep before releasing it
928 */
929 ta_ctx->sep_used->current_hash_req = NULL;
930 ta_ctx->sep_used->current_cypher_req = NULL;
931 ta_ctx->sep_used->current_request = 0;
932 ta_ctx->sep_used->current_hash_stage = 0;
933 ta_ctx->sep_used->ta_ctx = NULL;
934 ta_ctx->sep_used->in_kernel = 0;
935
936 ta_ctx->call_status.status = 0;
937
938 /* Remove anything confidential */
939 memset(ta_ctx->sep_used->shared_addr, 0,
940 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
941
942 sep_queue_status_remove(ta_ctx->sep_used, &ta_ctx->queue_elem);
943
944 #ifdef SEP_ENABLE_RUNTIME_PM
945 ta_ctx->sep_used->in_use = 0;
946 pm_runtime_mark_last_busy(&ta_ctx->sep_used->pdev->dev);
947 pm_runtime_put_autosuspend(&ta_ctx->sep_used->pdev->dev);
948 #endif
949
950 clear_bit(SEP_WORKING_LOCK_BIT,
951 &ta_ctx->sep_used->in_use_flags);
952 ta_ctx->sep_used->pid_doing_transaction = 0;
953
954 dev_dbg(&ta_ctx->sep_used->pdev->dev,
955 "[PID%d] waking up next transaction\n",
956 current->pid);
957
958 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
959 &ta_ctx->sep_used->in_use_flags);
960 wake_up(&ta_ctx->sep_used->event_transactions);
961
962 ta_ctx->i_own_sep = 0;
963 }
964 }
965
966 /**
967 * Release crypto infrastructure from EINPROGRESS and
968 * clear sep_dev so that SEP is available to anyone
969 */
970 static void sep_crypto_release(struct sep_system_ctx *sctx,
971 struct this_task_ctx *ta_ctx, u32 error)
972 {
973 struct ahash_request *hash_req = ta_ctx->current_hash_req;
974 struct ablkcipher_request *cypher_req =
975 ta_ctx->current_cypher_req;
976 struct sep_device *sep = ta_ctx->sep_used;
977
978 sep_clear_out(ta_ctx);
979
980 /**
981 * This may not yet exist depending when we
982 * chose to bail out. If it does exist, set
983 * it to 1
984 */
985 if (ta_ctx->are_we_done_yet != NULL)
986 *ta_ctx->are_we_done_yet = 1;
987
988 if (cypher_req != NULL) {
989 if ((sctx->key_sent == 1) ||
990 ((error != 0) && (error != -EINPROGRESS))) {
991 if (cypher_req->base.complete == NULL) {
992 dev_dbg(&sep->pdev->dev,
993 "release is null for cypher!");
994 } else {
995 cypher_req->base.complete(
996 &cypher_req->base, error);
997 }
998 }
999 }
1000
1001 if (hash_req != NULL) {
1002 if (hash_req->base.complete == NULL) {
1003 dev_dbg(&sep->pdev->dev,
1004 "release is null for hash!");
1005 } else {
1006 hash_req->base.complete(
1007 &hash_req->base, error);
1008 }
1009 }
1010 }
1011
1012 /**
1013 * This is where we grab the sep itself and tell it to do something.
1014 * It will sleep if the sep is currently busy
1015 * and it will return 0 if sep is now ours; error value if there
1016 * were problems
1017 */
1018 static int sep_crypto_take_sep(struct this_task_ctx *ta_ctx)
1019 {
1020 struct sep_device *sep = ta_ctx->sep_used;
1021 int result;
1022 struct sep_msgarea_hdr *my_msg_header;
1023
1024 my_msg_header = (struct sep_msgarea_hdr *)ta_ctx->msg;
1025
1026 /* add to status queue */
1027 ta_ctx->queue_elem = sep_queue_status_add(sep, my_msg_header->opcode,
1028 ta_ctx->nbytes, current->pid,
1029 current->comm, sizeof(current->comm));
1030
1031 if (!ta_ctx->queue_elem) {
1032 dev_dbg(&sep->pdev->dev,
1033 "[PID%d] updating queue status error\n", current->pid);
1034 return -EINVAL;
1035 }
1036
1037 /* get the device; this can sleep */
1038 result = sep_wait_transaction(sep);
1039 if (result)
1040 return result;
1041
1042 if (sep_dev->power_save_setup == 1)
1043 pm_runtime_get_sync(&sep_dev->pdev->dev);
1044
1045 /* Copy in the message */
1046 memcpy(sep->shared_addr, ta_ctx->msg,
1047 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1048
1049 /* Copy in the dcb information if there is any */
1050 if (ta_ctx->dcb_region) {
1051 result = sep_activate_dcb_dmatables_context(sep,
1052 &ta_ctx->dcb_region, &ta_ctx->dmatables_region,
1053 ta_ctx->dma_ctx);
1054 if (result)
1055 return result;
1056 }
1057
1058 /* Mark the device so we know how to finish the job in the tasklet */
1059 if (ta_ctx->current_hash_req)
1060 sep->current_hash_req = ta_ctx->current_hash_req;
1061 else
1062 sep->current_cypher_req = ta_ctx->current_cypher_req;
1063
1064 sep->current_request = ta_ctx->current_request;
1065 sep->current_hash_stage = ta_ctx->current_hash_stage;
1066 sep->ta_ctx = ta_ctx;
1067 sep->in_kernel = 1;
1068 ta_ctx->i_own_sep = 1;
1069
1070 /* need to set bit first to avoid race condition with interrupt */
1071 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, &ta_ctx->call_status.status);
1072
1073 result = sep_send_command_handler(sep);
1074
1075 dev_dbg(&sep->pdev->dev, "[PID%d]: sending command to the sep\n",
1076 current->pid);
1077
1078 if (!result)
1079 dev_dbg(&sep->pdev->dev, "[PID%d]: command sent okay\n",
1080 current->pid);
1081 else {
1082 dev_dbg(&sep->pdev->dev, "[PID%d]: cant send command\n",
1083 current->pid);
1084 clear_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
1085 &ta_ctx->call_status.status);
1086 }
1087
1088 return result;
1089 }
1090
1091 /**
1092 * This function sets things up for a crypto data block process
1093 * This does all preparation, but does not try to grab the
1094 * sep
1095 * @req: pointer to struct ablkcipher_request
1096 * returns: 0 if all went well, non zero if error
1097 */
1098 static int sep_crypto_block_data(struct ablkcipher_request *req)
1099 {
1100
1101 int int_error;
1102 u32 msg_offset;
1103 static u32 msg[10];
1104 void *src_ptr;
1105 void *dst_ptr;
1106
1107 static char small_buf[100];
1108 ssize_t copy_result;
1109 int result;
1110
1111 struct scatterlist *new_sg;
1112 struct this_task_ctx *ta_ctx;
1113 struct crypto_ablkcipher *tfm;
1114 struct sep_system_ctx *sctx;
1115
1116 struct sep_des_internal_context *des_internal;
1117 struct sep_aes_internal_context *aes_internal;
1118
1119 ta_ctx = ablkcipher_request_ctx(req);
1120 tfm = crypto_ablkcipher_reqtfm(req);
1121 sctx = crypto_ablkcipher_ctx(tfm);
1122
1123 /* start the walk on scatterlists */
1124 ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1125 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sep crypto block data size of %x\n",
1126 req->nbytes);
1127
1128 int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1129 if (int_error) {
1130 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1131 int_error);
1132 return -ENOMEM;
1133 }
1134
1135 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1136 "crypto block: src is %lx dst is %lx\n",
1137 (unsigned long)req->src, (unsigned long)req->dst);
1138
1139 /* Make sure all pages are even block */
1140 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
1141 req->nbytes, ta_ctx->walk.blocksize, &new_sg, 1);
1142
1143 if (int_error < 0) {
1144 dev_warn(&ta_ctx->sep_used->pdev->dev, "oddball page error\n");
1145 return int_error;
1146 } else if (int_error == 1) {
1147 ta_ctx->src_sg = new_sg;
1148 ta_ctx->src_sg_hold = new_sg;
1149 } else {
1150 ta_ctx->src_sg = req->src;
1151 ta_ctx->src_sg_hold = NULL;
1152 }
1153
1154 int_error = sep_oddball_pages(ta_ctx->sep_used, req->dst,
1155 req->nbytes, ta_ctx->walk.blocksize, &new_sg, 0);
1156
1157 if (int_error < 0) {
1158 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1159 int_error);
1160 return int_error;
1161 } else if (int_error == 1) {
1162 ta_ctx->dst_sg = new_sg;
1163 ta_ctx->dst_sg_hold = new_sg;
1164 } else {
1165 ta_ctx->dst_sg = req->dst;
1166 ta_ctx->dst_sg_hold = NULL;
1167 }
1168
1169 /* set nbytes for queue status */
1170 ta_ctx->nbytes = req->nbytes;
1171
1172 /* Key already done; this is for data */
1173 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending data\n");
1174
1175 /* check for valid data and proper spacing */
1176 src_ptr = sg_virt(ta_ctx->src_sg);
1177 dst_ptr = sg_virt(ta_ctx->dst_sg);
1178
1179 if (!src_ptr || !dst_ptr ||
1180 (ta_ctx->current_cypher_req->nbytes %
1181 crypto_ablkcipher_blocksize(tfm))) {
1182
1183 dev_warn(&ta_ctx->sep_used->pdev->dev,
1184 "cipher block size odd\n");
1185 dev_warn(&ta_ctx->sep_used->pdev->dev,
1186 "cipher block size is %x\n",
1187 crypto_ablkcipher_blocksize(tfm));
1188 dev_warn(&ta_ctx->sep_used->pdev->dev,
1189 "cipher data size is %x\n",
1190 ta_ctx->current_cypher_req->nbytes);
1191 return -EINVAL;
1192 }
1193
1194 if (partial_overlap(src_ptr, dst_ptr,
1195 ta_ctx->current_cypher_req->nbytes)) {
1196 dev_warn(&ta_ctx->sep_used->pdev->dev,
1197 "block partial overlap\n");
1198 return -EINVAL;
1199 }
1200
1201 /* Put together the message */
1202 sep_make_header(ta_ctx, &msg_offset, ta_ctx->block_opcode);
1203
1204 /* If des, and size is 1 block, put directly in msg */
1205 if ((ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) &&
1206 (req->nbytes == crypto_ablkcipher_blocksize(tfm))) {
1207
1208 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1209 "writing out one block des\n");
1210
1211 copy_result = sg_copy_to_buffer(
1212 ta_ctx->src_sg, sep_sg_nents(ta_ctx->src_sg),
1213 small_buf, crypto_ablkcipher_blocksize(tfm));
1214
1215 if (copy_result != crypto_ablkcipher_blocksize(tfm)) {
1216 dev_warn(&ta_ctx->sep_used->pdev->dev,
1217 "des block copy failed\n");
1218 return -ENOMEM;
1219 }
1220
1221 /* Put data into message */
1222 sep_write_msg(ta_ctx, small_buf,
1223 crypto_ablkcipher_blocksize(tfm),
1224 crypto_ablkcipher_blocksize(tfm) * 2,
1225 &msg_offset, 1);
1226
1227 /* Put size into message */
1228 sep_write_msg(ta_ctx, &req->nbytes,
1229 sizeof(u32), sizeof(u32), &msg_offset, 0);
1230 } else {
1231 /* Otherwise, fill out dma tables */
1232 ta_ctx->dcb_input_data.app_in_address = src_ptr;
1233 ta_ctx->dcb_input_data.data_in_size = req->nbytes;
1234 ta_ctx->dcb_input_data.app_out_address = dst_ptr;
1235 ta_ctx->dcb_input_data.block_size =
1236 crypto_ablkcipher_blocksize(tfm);
1237 ta_ctx->dcb_input_data.tail_block_size = 0;
1238 ta_ctx->dcb_input_data.is_applet = 0;
1239 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
1240 ta_ctx->dcb_input_data.dst_sg = ta_ctx->dst_sg;
1241
1242 result = sep_create_dcb_dmatables_context_kernel(
1243 ta_ctx->sep_used,
1244 &ta_ctx->dcb_region,
1245 &ta_ctx->dmatables_region,
1246 &ta_ctx->dma_ctx,
1247 &ta_ctx->dcb_input_data,
1248 1);
1249 if (result) {
1250 dev_warn(&ta_ctx->sep_used->pdev->dev,
1251 "crypto dma table create failed\n");
1252 return -EINVAL;
1253 }
1254
1255 /* Portion of msg is nulled (no data) */
1256 msg[0] = (u32)0;
1257 msg[1] = (u32)0;
1258 msg[2] = (u32)0;
1259 msg[3] = (u32)0;
1260 msg[4] = (u32)0;
1261 sep_write_msg(ta_ctx, (void *)msg, sizeof(u32) * 5,
1262 sizeof(u32) * 5, &msg_offset, 0);
1263 }
1264
1265 /**
1266 * Before we write the message, we need to overwrite the
1267 * vendor's IV with the one from our own ablkcipher walk
1268 * iv because this is needed for dm-crypt
1269 */
1270 sep_dump_ivs(req, "sending data block to sep\n");
1271 if ((ta_ctx->current_request == DES_CBC) &&
1272 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1273
1274 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1275 "overwrite vendor iv on DES\n");
1276 des_internal = (struct sep_des_internal_context *)
1277 sctx->des_private_ctx.ctx_buf;
1278 memcpy((void *)des_internal->iv_context,
1279 ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1280 } else if ((ta_ctx->current_request == AES_CBC) &&
1281 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1282
1283 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1284 "overwrite vendor iv on AES\n");
1285 aes_internal = (struct sep_aes_internal_context *)
1286 sctx->aes_private_ctx.cbuff;
1287 memcpy((void *)aes_internal->aes_ctx_iv,
1288 ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1289 }
1290
1291 /* Write context into message */
1292 if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1293 sep_write_context(ta_ctx, &msg_offset,
1294 &sctx->des_private_ctx,
1295 sizeof(struct sep_des_private_context));
1296 } else {
1297 sep_write_context(ta_ctx, &msg_offset,
1298 &sctx->aes_private_ctx,
1299 sizeof(struct sep_aes_private_context));
1300 }
1301
1302 /* conclude message */
1303 sep_end_msg(ta_ctx, msg_offset);
1304
1305 /* Parent (caller) is now ready to tell the sep to do ahead */
1306 return 0;
1307 }
1308
1309
1310 /**
1311 * This function sets things up for a crypto key submit process
1312 * This does all preparation, but does not try to grab the
1313 * sep
1314 * @req: pointer to struct ablkcipher_request
1315 * returns: 0 if all went well, non zero if error
1316 */
1317 static int sep_crypto_send_key(struct ablkcipher_request *req)
1318 {
1319
1320 int int_error;
1321 u32 msg_offset;
1322 static u32 msg[10];
1323
1324 u32 max_length;
1325 struct this_task_ctx *ta_ctx;
1326 struct crypto_ablkcipher *tfm;
1327 struct sep_system_ctx *sctx;
1328
1329 ta_ctx = ablkcipher_request_ctx(req);
1330 tfm = crypto_ablkcipher_reqtfm(req);
1331 sctx = crypto_ablkcipher_ctx(tfm);
1332
1333 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending key\n");
1334
1335 /* start the walk on scatterlists */
1336 ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1337 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1338 "sep crypto block data size of %x\n", req->nbytes);
1339
1340 int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1341 if (int_error) {
1342 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1343 int_error);
1344 return -ENOMEM;
1345 }
1346
1347 /* check iv */
1348 if ((ta_ctx->current_request == DES_CBC) &&
1349 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1350 if (!ta_ctx->walk.iv) {
1351 dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1352 return -EINVAL;
1353 }
1354
1355 memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
1356 }
1357
1358 if ((ta_ctx->current_request == AES_CBC) &&
1359 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1360 if (!ta_ctx->walk.iv) {
1361 dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1362 return -EINVAL;
1363 }
1364
1365 memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
1366 }
1367
1368 /* put together message to SEP */
1369 /* Start with op code */
1370 sep_make_header(ta_ctx, &msg_offset, ta_ctx->init_opcode);
1371
1372 /* now deal with IV */
1373 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1374 if (ta_ctx->des_opmode == SEP_DES_CBC) {
1375 sep_write_msg(ta_ctx, ta_ctx->iv,
1376 SEP_DES_IV_SIZE_BYTES, sizeof(u32) * 4,
1377 &msg_offset, 1);
1378 } else {
1379 /* Skip if ECB */
1380 msg_offset += 4 * sizeof(u32);
1381 }
1382 } else {
1383 max_length = ((SEP_AES_IV_SIZE_BYTES + 3) /
1384 sizeof(u32)) * sizeof(u32);
1385 if (ta_ctx->aes_opmode == SEP_AES_CBC) {
1386 sep_write_msg(ta_ctx, ta_ctx->iv,
1387 SEP_AES_IV_SIZE_BYTES, max_length,
1388 &msg_offset, 1);
1389 } else {
1390 /* Skip if ECB */
1391 msg_offset += max_length;
1392 }
1393 }
1394
1395 /* load the key */
1396 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1397 sep_write_msg(ta_ctx, (void *)&sctx->key.des.key1,
1398 sizeof(u32) * 8, sizeof(u32) * 8,
1399 &msg_offset, 1);
1400
1401 msg[0] = (u32)sctx->des_nbr_keys;
1402 msg[1] = (u32)ta_ctx->des_encmode;
1403 msg[2] = (u32)ta_ctx->des_opmode;
1404
1405 sep_write_msg(ta_ctx, (void *)msg,
1406 sizeof(u32) * 3, sizeof(u32) * 3,
1407 &msg_offset, 0);
1408 } else {
1409 sep_write_msg(ta_ctx, (void *)&sctx->key.aes,
1410 sctx->keylen,
1411 SEP_AES_MAX_KEY_SIZE_BYTES,
1412 &msg_offset, 1);
1413
1414 msg[0] = (u32)sctx->aes_key_size;
1415 msg[1] = (u32)ta_ctx->aes_encmode;
1416 msg[2] = (u32)ta_ctx->aes_opmode;
1417 msg[3] = (u32)0; /* Secret key is not used */
1418 sep_write_msg(ta_ctx, (void *)msg,
1419 sizeof(u32) * 4, sizeof(u32) * 4,
1420 &msg_offset, 0);
1421 }
1422
1423 /* conclude message */
1424 sep_end_msg(ta_ctx, msg_offset);
1425
1426 /* Parent (caller) is now ready to tell the sep to do ahead */
1427 return 0;
1428 }
1429
1430
1431 /* This needs to be run as a work queue as it can be put asleep */
1432 static void sep_crypto_block(void *data)
1433 {
1434 unsigned long end_time;
1435
1436 int result;
1437
1438 struct ablkcipher_request *req;
1439 struct this_task_ctx *ta_ctx;
1440 struct crypto_ablkcipher *tfm;
1441 struct sep_system_ctx *sctx;
1442 int are_we_done_yet;
1443
1444 req = (struct ablkcipher_request *)data;
1445 ta_ctx = ablkcipher_request_ctx(req);
1446 tfm = crypto_ablkcipher_reqtfm(req);
1447 sctx = crypto_ablkcipher_ctx(tfm);
1448
1449 ta_ctx->are_we_done_yet = &are_we_done_yet;
1450
1451 pr_debug("sep_crypto_block\n");
1452 pr_debug("tfm is %p sctx is %p ta_ctx is %p\n",
1453 tfm, sctx, ta_ctx);
1454 pr_debug("key_sent is %d\n", sctx->key_sent);
1455
1456 /* do we need to send the key */
1457 if (sctx->key_sent == 0) {
1458 are_we_done_yet = 0;
1459 result = sep_crypto_send_key(req); /* prep to send key */
1460 if (result != 0) {
1461 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1462 "could not prep key %x\n", result);
1463 sep_crypto_release(sctx, ta_ctx, result);
1464 return;
1465 }
1466
1467 result = sep_crypto_take_sep(ta_ctx);
1468 if (result) {
1469 dev_warn(&ta_ctx->sep_used->pdev->dev,
1470 "sep_crypto_take_sep for key send failed\n");
1471 sep_crypto_release(sctx, ta_ctx, result);
1472 return;
1473 }
1474
1475 /* now we sit and wait up to a fixed time for completion */
1476 end_time = jiffies + (WAIT_TIME * HZ);
1477 while ((time_before(jiffies, end_time)) &&
1478 (are_we_done_yet == 0))
1479 schedule();
1480
1481 /* Done waiting; still not done yet? */
1482 if (are_we_done_yet == 0) {
1483 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1484 "Send key job never got done\n");
1485 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1486 return;
1487 }
1488
1489 /* Set the key sent variable so this can be skipped later */
1490 sctx->key_sent = 1;
1491 }
1492
1493 /* Key sent (or maybe not if we did not have to), now send block */
1494 are_we_done_yet = 0;
1495
1496 result = sep_crypto_block_data(req);
1497
1498 if (result != 0) {
1499 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1500 "could prep not send block %x\n", result);
1501 sep_crypto_release(sctx, ta_ctx, result);
1502 return;
1503 }
1504
1505 result = sep_crypto_take_sep(ta_ctx);
1506 if (result) {
1507 dev_warn(&ta_ctx->sep_used->pdev->dev,
1508 "sep_crypto_take_sep for block send failed\n");
1509 sep_crypto_release(sctx, ta_ctx, result);
1510 return;
1511 }
1512
1513 /* now we sit and wait up to a fixed time for completion */
1514 end_time = jiffies + (WAIT_TIME * HZ);
1515 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
1516 schedule();
1517
1518 /* Done waiting; still not done yet? */
1519 if (are_we_done_yet == 0) {
1520 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1521 "Send block job never got done\n");
1522 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1523 return;
1524 }
1525
1526 /* That's it; entire thing done, get out of queue */
1527
1528 pr_debug("crypto_block leaving\n");
1529 pr_debug("tfm is %p sctx is %p ta_ctx is %p\n", tfm, sctx, ta_ctx);
1530 }
1531
1532 /**
1533 * Post operation (after interrupt) for crypto block
1534 */
1535 static u32 crypto_post_op(struct sep_device *sep)
1536 {
1537 /* HERE */
1538 u32 u32_error;
1539 u32 msg_offset;
1540
1541 ssize_t copy_result;
1542 static char small_buf[100];
1543
1544 struct ablkcipher_request *req;
1545 struct this_task_ctx *ta_ctx;
1546 struct sep_system_ctx *sctx;
1547 struct crypto_ablkcipher *tfm;
1548
1549 struct sep_des_internal_context *des_internal;
1550 struct sep_aes_internal_context *aes_internal;
1551
1552 if (!sep->current_cypher_req)
1553 return -EINVAL;
1554
1555 /* hold req since we need to submit work after clearing sep */
1556 req = sep->current_cypher_req;
1557
1558 ta_ctx = ablkcipher_request_ctx(sep->current_cypher_req);
1559 tfm = crypto_ablkcipher_reqtfm(sep->current_cypher_req);
1560 sctx = crypto_ablkcipher_ctx(tfm);
1561
1562 pr_debug("crypto_post op\n");
1563 pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1564 sctx->key_sent, tfm, sctx, ta_ctx);
1565
1566 dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op\n");
1567 dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op message dump\n");
1568
1569 /* first bring msg from shared area to local area */
1570 memcpy(ta_ctx->msg, sep->shared_addr,
1571 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1572
1573 /* Is this the result of performing init (key to SEP */
1574 if (sctx->key_sent == 0) {
1575
1576 /* Did SEP do it okay */
1577 u32_error = sep_verify_op(ta_ctx, ta_ctx->init_opcode,
1578 &msg_offset);
1579 if (u32_error) {
1580 dev_warn(&ta_ctx->sep_used->pdev->dev,
1581 "aes init error %x\n", u32_error);
1582 sep_crypto_release(sctx, ta_ctx, u32_error);
1583 return u32_error;
1584 }
1585
1586 /* Read Context */
1587 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1588 sep_read_context(ta_ctx, &msg_offset,
1589 &sctx->des_private_ctx,
1590 sizeof(struct sep_des_private_context));
1591 } else {
1592 sep_read_context(ta_ctx, &msg_offset,
1593 &sctx->aes_private_ctx,
1594 sizeof(struct sep_aes_private_context));
1595 }
1596
1597 sep_dump_ivs(req, "after sending key to sep\n");
1598
1599 /* key sent went okay; release sep, and set are_we_done_yet */
1600 sctx->key_sent = 1;
1601 sep_crypto_release(sctx, ta_ctx, -EINPROGRESS);
1602
1603 } else {
1604
1605 /**
1606 * This is the result of a block request
1607 */
1608 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1609 "crypto_post_op block response\n");
1610
1611 u32_error = sep_verify_op(ta_ctx, ta_ctx->block_opcode,
1612 &msg_offset);
1613
1614 if (u32_error) {
1615 dev_warn(&ta_ctx->sep_used->pdev->dev,
1616 "sep block error %x\n", u32_error);
1617 sep_crypto_release(sctx, ta_ctx, u32_error);
1618 return -EINVAL;
1619 }
1620
1621 if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1622
1623 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1624 "post op for DES\n");
1625
1626 /* special case for 1 block des */
1627 if (sep->current_cypher_req->nbytes ==
1628 crypto_ablkcipher_blocksize(tfm)) {
1629
1630 sep_read_msg(ta_ctx, small_buf,
1631 crypto_ablkcipher_blocksize(tfm),
1632 crypto_ablkcipher_blocksize(tfm) * 2,
1633 &msg_offset, 1);
1634
1635 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1636 "reading in block des\n");
1637
1638 copy_result = sg_copy_from_buffer(
1639 ta_ctx->dst_sg,
1640 sep_sg_nents(ta_ctx->dst_sg),
1641 small_buf,
1642 crypto_ablkcipher_blocksize(tfm));
1643
1644 if (copy_result !=
1645 crypto_ablkcipher_blocksize(tfm)) {
1646
1647 dev_warn(&ta_ctx->sep_used->pdev->dev,
1648 "des block copy failed\n");
1649 sep_crypto_release(sctx, ta_ctx,
1650 -ENOMEM);
1651 return -ENOMEM;
1652 }
1653 }
1654
1655 /* Read Context */
1656 sep_read_context(ta_ctx, &msg_offset,
1657 &sctx->des_private_ctx,
1658 sizeof(struct sep_des_private_context));
1659 } else {
1660
1661 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1662 "post op for AES\n");
1663
1664 /* Skip the MAC Output */
1665 msg_offset += (sizeof(u32) * 4);
1666
1667 /* Read Context */
1668 sep_read_context(ta_ctx, &msg_offset,
1669 &sctx->aes_private_ctx,
1670 sizeof(struct sep_aes_private_context));
1671 }
1672
1673 /* Copy to correct sg if this block had oddball pages */
1674 if (ta_ctx->dst_sg_hold)
1675 sep_copy_sg(ta_ctx->sep_used,
1676 ta_ctx->dst_sg,
1677 ta_ctx->current_cypher_req->dst,
1678 ta_ctx->current_cypher_req->nbytes);
1679
1680 /**
1681 * Copy the iv's back to the walk.iv
1682 * This is required for dm_crypt
1683 */
1684 sep_dump_ivs(req, "got data block from sep\n");
1685 if ((ta_ctx->current_request == DES_CBC) &&
1686 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1687
1688 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1689 "returning result iv to walk on DES\n");
1690 des_internal = (struct sep_des_internal_context *)
1691 sctx->des_private_ctx.ctx_buf;
1692 memcpy(ta_ctx->walk.iv,
1693 (void *)des_internal->iv_context,
1694 crypto_ablkcipher_ivsize(tfm));
1695 } else if ((ta_ctx->current_request == AES_CBC) &&
1696 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1697
1698 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1699 "returning result iv to walk on AES\n");
1700 aes_internal = (struct sep_aes_internal_context *)
1701 sctx->aes_private_ctx.cbuff;
1702 memcpy(ta_ctx->walk.iv,
1703 (void *)aes_internal->aes_ctx_iv,
1704 crypto_ablkcipher_ivsize(tfm));
1705 }
1706
1707 /* finished, release everything */
1708 sep_crypto_release(sctx, ta_ctx, 0);
1709 }
1710 pr_debug("crypto_post_op done\n");
1711 pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1712 sctx->key_sent, tfm, sctx, ta_ctx);
1713
1714 return 0;
1715 }
1716
1717 static u32 hash_init_post_op(struct sep_device *sep)
1718 {
1719 u32 u32_error;
1720 u32 msg_offset;
1721 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1722 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1723 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1724 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1725 "hash init post op\n");
1726
1727 /* first bring msg from shared area to local area */
1728 memcpy(ta_ctx->msg, sep->shared_addr,
1729 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1730
1731 u32_error = sep_verify_op(ta_ctx, SEP_HASH_INIT_OPCODE,
1732 &msg_offset);
1733
1734 if (u32_error) {
1735 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1736 u32_error);
1737 sep_crypto_release(sctx, ta_ctx, u32_error);
1738 return u32_error;
1739 }
1740
1741 /* Read Context */
1742 sep_read_context(ta_ctx, &msg_offset,
1743 &sctx->hash_private_ctx,
1744 sizeof(struct sep_hash_private_context));
1745
1746 /* Signal to crypto infrastructure and clear out */
1747 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash init post op done\n");
1748 sep_crypto_release(sctx, ta_ctx, 0);
1749 return 0;
1750 }
1751
1752 static u32 hash_update_post_op(struct sep_device *sep)
1753 {
1754 u32 u32_error;
1755 u32 msg_offset;
1756 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1757 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1758 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1759 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1760 "hash update post op\n");
1761
1762 /* first bring msg from shared area to local area */
1763 memcpy(ta_ctx->msg, sep->shared_addr,
1764 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1765
1766 u32_error = sep_verify_op(ta_ctx, SEP_HASH_UPDATE_OPCODE,
1767 &msg_offset);
1768
1769 if (u32_error) {
1770 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1771 u32_error);
1772 sep_crypto_release(sctx, ta_ctx, u32_error);
1773 return u32_error;
1774 }
1775
1776 /* Read Context */
1777 sep_read_context(ta_ctx, &msg_offset,
1778 &sctx->hash_private_ctx,
1779 sizeof(struct sep_hash_private_context));
1780
1781 /**
1782 * Following is only for finup; if we just completed the
1783 * data portion of finup, we now need to kick off the
1784 * finish portion of finup.
1785 */
1786
1787 if (ta_ctx->sep_used->current_hash_stage == HASH_FINUP_DATA) {
1788
1789 /* first reset stage to HASH_FINUP_FINISH */
1790 ta_ctx->sep_used->current_hash_stage = HASH_FINUP_FINISH;
1791
1792 /* now enqueue the finish operation */
1793 spin_lock_irq(&queue_lock);
1794 u32_error = crypto_enqueue_request(&sep_queue,
1795 &ta_ctx->sep_used->current_hash_req->base);
1796 spin_unlock_irq(&queue_lock);
1797
1798 if ((u32_error != 0) && (u32_error != -EINPROGRESS)) {
1799 dev_warn(&ta_ctx->sep_used->pdev->dev,
1800 "spe cypher post op cant queue\n");
1801 sep_crypto_release(sctx, ta_ctx, u32_error);
1802 return u32_error;
1803 }
1804
1805 /* schedule the data send */
1806 u32_error = sep_submit_work(ta_ctx->sep_used->workqueue,
1807 sep_dequeuer, (void *)&sep_queue);
1808
1809 if (u32_error) {
1810 dev_warn(&ta_ctx->sep_used->pdev->dev,
1811 "cant submit work sep_crypto_block\n");
1812 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1813 return -EINVAL;
1814 }
1815 }
1816
1817 /* Signal to crypto infrastructure and clear out */
1818 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash update post op done\n");
1819 sep_crypto_release(sctx, ta_ctx, 0);
1820 return 0;
1821 }
1822
1823 static u32 hash_final_post_op(struct sep_device *sep)
1824 {
1825 int max_length;
1826 u32 u32_error;
1827 u32 msg_offset;
1828 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1829 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1830 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1831 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1832 "hash final post op\n");
1833
1834 /* first bring msg from shared area to local area */
1835 memcpy(ta_ctx->msg, sep->shared_addr,
1836 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1837
1838 u32_error = sep_verify_op(ta_ctx, SEP_HASH_FINISH_OPCODE,
1839 &msg_offset);
1840
1841 if (u32_error) {
1842 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash finish error %x\n",
1843 u32_error);
1844 sep_crypto_release(sctx, ta_ctx, u32_error);
1845 return u32_error;
1846 }
1847
1848 /* Grab the result */
1849 if (ta_ctx->current_hash_req->result == NULL) {
1850 /* Oops, null buffer; error out here */
1851 dev_warn(&ta_ctx->sep_used->pdev->dev,
1852 "hash finish null buffer\n");
1853 sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1854 return -ENOMEM;
1855 }
1856
1857 max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1858 sizeof(u32)) * sizeof(u32);
1859
1860 sep_read_msg(ta_ctx,
1861 ta_ctx->current_hash_req->result,
1862 crypto_ahash_digestsize(tfm), max_length,
1863 &msg_offset, 0);
1864
1865 /* Signal to crypto infrastructure and clear out */
1866 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash finish post op done\n");
1867 sep_crypto_release(sctx, ta_ctx, 0);
1868 return 0;
1869 }
1870
1871 static u32 hash_digest_post_op(struct sep_device *sep)
1872 {
1873 int max_length;
1874 u32 u32_error;
1875 u32 msg_offset;
1876 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1877 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1878 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1879 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1880 "hash digest post op\n");
1881
1882 /* first bring msg from shared area to local area */
1883 memcpy(ta_ctx->msg, sep->shared_addr,
1884 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1885
1886 u32_error = sep_verify_op(ta_ctx, SEP_HASH_SINGLE_OPCODE,
1887 &msg_offset);
1888
1889 if (u32_error) {
1890 dev_warn(&ta_ctx->sep_used->pdev->dev,
1891 "hash digest finish error %x\n", u32_error);
1892
1893 sep_crypto_release(sctx, ta_ctx, u32_error);
1894 return u32_error;
1895 }
1896
1897 /* Grab the result */
1898 if (ta_ctx->current_hash_req->result == NULL) {
1899 /* Oops, null buffer; error out here */
1900 dev_warn(&ta_ctx->sep_used->pdev->dev,
1901 "hash digest finish null buffer\n");
1902 sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1903 return -ENOMEM;
1904 }
1905
1906 max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1907 sizeof(u32)) * sizeof(u32);
1908
1909 sep_read_msg(ta_ctx,
1910 ta_ctx->current_hash_req->result,
1911 crypto_ahash_digestsize(tfm), max_length,
1912 &msg_offset, 0);
1913
1914 /* Signal to crypto infrastructure and clear out */
1915 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1916 "hash digest finish post op done\n");
1917
1918 sep_crypto_release(sctx, ta_ctx, 0);
1919 return 0;
1920 }
1921
1922 /**
1923 * The sep_finish function is the function that is scheduled (via tasklet)
1924 * by the interrupt service routine when the SEP sends and interrupt
1925 * This is only called by the interrupt handler as a tasklet.
1926 */
1927 static void sep_finish(unsigned long data)
1928 {
1929 struct sep_device *sep_dev;
1930 int res;
1931
1932 res = 0;
1933
1934 if (data == 0) {
1935 pr_debug("sep_finish called with null data\n");
1936 return;
1937 }
1938
1939 sep_dev = (struct sep_device *)data;
1940 if (sep_dev == NULL) {
1941 pr_debug("sep_finish; sep_dev is NULL\n");
1942 return;
1943 }
1944
1945 if (sep_dev->in_kernel == (u32)0) {
1946 dev_warn(&sep_dev->pdev->dev,
1947 "sep_finish; not in kernel operation\n");
1948 return;
1949 }
1950
1951 /* Did we really do a sep command prior to this? */
1952 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
1953 &sep_dev->ta_ctx->call_status.status)) {
1954
1955 dev_warn(&sep_dev->pdev->dev, "[PID%d] sendmsg not called\n",
1956 current->pid);
1957 return;
1958 }
1959
1960 if (sep_dev->send_ct != sep_dev->reply_ct) {
1961 dev_warn(&sep_dev->pdev->dev,
1962 "[PID%d] poll; no message came back\n",
1963 current->pid);
1964 return;
1965 }
1966
1967 /* Check for error (In case time ran out) */
1968 if ((res != 0x0) && (res != 0x8)) {
1969 dev_warn(&sep_dev->pdev->dev,
1970 "[PID%d] poll; poll error GPR3 is %x\n",
1971 current->pid, res);
1972 return;
1973 }
1974
1975 /* What kind of interrupt from sep was this? */
1976 res = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
1977
1978 dev_dbg(&sep_dev->pdev->dev, "[PID%d] GPR2 at crypto finish is %x\n",
1979 current->pid, res);
1980
1981 /* Print request? */
1982 if ((res >> 30) & 0x1) {
1983 dev_dbg(&sep_dev->pdev->dev, "[PID%d] sep print req\n",
1984 current->pid);
1985 dev_dbg(&sep_dev->pdev->dev, "[PID%d] contents: %s\n",
1986 current->pid,
1987 (char *)(sep_dev->shared_addr +
1988 SEP_DRIVER_PRINTF_OFFSET_IN_BYTES));
1989 return;
1990 }
1991
1992 /* Request for daemon (not currently in POR)? */
1993 if (res >> 31) {
1994 dev_dbg(&sep_dev->pdev->dev,
1995 "[PID%d] sep request; ignoring\n",
1996 current->pid);
1997 return;
1998 }
1999
2000 /* If we got here, then we have a replay to a sep command */
2001
2002 dev_dbg(&sep_dev->pdev->dev,
2003 "[PID%d] sep reply to command; processing request: %x\n",
2004 current->pid, sep_dev->current_request);
2005
2006 switch (sep_dev->current_request) {
2007 case AES_CBC:
2008 case AES_ECB:
2009 case DES_CBC:
2010 case DES_ECB:
2011 res = crypto_post_op(sep_dev);
2012 break;
2013 case SHA1:
2014 case MD5:
2015 case SHA224:
2016 case SHA256:
2017 switch (sep_dev->current_hash_stage) {
2018 case HASH_INIT:
2019 res = hash_init_post_op(sep_dev);
2020 break;
2021 case HASH_UPDATE:
2022 case HASH_FINUP_DATA:
2023 res = hash_update_post_op(sep_dev);
2024 break;
2025 case HASH_FINUP_FINISH:
2026 case HASH_FINISH:
2027 res = hash_final_post_op(sep_dev);
2028 break;
2029 case HASH_DIGEST:
2030 res = hash_digest_post_op(sep_dev);
2031 break;
2032 default:
2033 pr_debug("sep - invalid stage for hash finish\n");
2034 }
2035 break;
2036 default:
2037 pr_debug("sep - invalid request for finish\n");
2038 }
2039
2040 if (res)
2041 pr_debug("sep - finish returned error %x\n", res);
2042 }
2043
2044 static int sep_hash_cra_init(struct crypto_tfm *tfm)
2045 {
2046 const char *alg_name = crypto_tfm_alg_name(tfm);
2047
2048 pr_debug("sep_hash_cra_init name is %s\n", alg_name);
2049
2050 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2051 sizeof(struct this_task_ctx));
2052 return 0;
2053 }
2054
2055 static void sep_hash_cra_exit(struct crypto_tfm *tfm)
2056 {
2057 pr_debug("sep_hash_cra_exit\n");
2058 }
2059
2060 static void sep_hash_init(void *data)
2061 {
2062 u32 msg_offset;
2063 int result;
2064 struct ahash_request *req;
2065 struct crypto_ahash *tfm;
2066 struct this_task_ctx *ta_ctx;
2067 struct sep_system_ctx *sctx;
2068 unsigned long end_time;
2069 int are_we_done_yet;
2070
2071 req = (struct ahash_request *)data;
2072 tfm = crypto_ahash_reqtfm(req);
2073 sctx = crypto_ahash_ctx(tfm);
2074 ta_ctx = ahash_request_ctx(req);
2075 ta_ctx->sep_used = sep_dev;
2076
2077 ta_ctx->are_we_done_yet = &are_we_done_yet;
2078
2079 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2080 "sep_hash_init\n");
2081 ta_ctx->current_hash_stage = HASH_INIT;
2082 /* opcode and mode */
2083 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_INIT_OPCODE);
2084 sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2085 sizeof(u32), sizeof(u32), &msg_offset, 0);
2086 sep_end_msg(ta_ctx, msg_offset);
2087
2088 are_we_done_yet = 0;
2089 result = sep_crypto_take_sep(ta_ctx);
2090 if (result) {
2091 dev_warn(&ta_ctx->sep_used->pdev->dev,
2092 "sep_hash_init take sep failed\n");
2093 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2094 }
2095
2096 /* now we sit and wait up to a fixed time for completion */
2097 end_time = jiffies + (WAIT_TIME * HZ);
2098 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2099 schedule();
2100
2101 /* Done waiting; still not done yet? */
2102 if (are_we_done_yet == 0) {
2103 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2104 "hash init never got done\n");
2105 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2106 return;
2107 }
2108
2109 }
2110
2111 static void sep_hash_update(void *data)
2112 {
2113 int int_error;
2114 u32 msg_offset;
2115 u32 len;
2116 struct sep_hash_internal_context *int_ctx;
2117 u32 block_size;
2118 u32 head_len;
2119 u32 tail_len;
2120 int are_we_done_yet;
2121
2122 static u32 msg[10];
2123 static char small_buf[100];
2124 void *src_ptr;
2125 struct scatterlist *new_sg;
2126 ssize_t copy_result;
2127 struct ahash_request *req;
2128 struct crypto_ahash *tfm;
2129 struct this_task_ctx *ta_ctx;
2130 struct sep_system_ctx *sctx;
2131 unsigned long end_time;
2132
2133 req = (struct ahash_request *)data;
2134 tfm = crypto_ahash_reqtfm(req);
2135 sctx = crypto_ahash_ctx(tfm);
2136 ta_ctx = ahash_request_ctx(req);
2137 ta_ctx->sep_used = sep_dev;
2138
2139 ta_ctx->are_we_done_yet = &are_we_done_yet;
2140
2141 /* length for queue status */
2142 ta_ctx->nbytes = req->nbytes;
2143
2144 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2145 "sep_hash_update\n");
2146 ta_ctx->current_hash_stage = HASH_UPDATE;
2147 len = req->nbytes;
2148
2149 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2150 tail_len = req->nbytes % block_size;
2151 dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", len);
2152 dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2153 dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2154
2155 /* Compute header/tail sizes */
2156 int_ctx = (struct sep_hash_internal_context *)&sctx->
2157 hash_private_ctx.internal_context;
2158 head_len = (block_size - int_ctx->prev_update_bytes) % block_size;
2159 tail_len = (req->nbytes - head_len) % block_size;
2160
2161 /* Make sure all pages are an even block */
2162 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2163 req->nbytes,
2164 block_size, &new_sg, 1);
2165
2166 if (int_error < 0) {
2167 dev_warn(&ta_ctx->sep_used->pdev->dev,
2168 "oddball pages error in crash update\n");
2169 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2170 return;
2171 } else if (int_error == 1) {
2172 ta_ctx->src_sg = new_sg;
2173 ta_ctx->src_sg_hold = new_sg;
2174 } else {
2175 ta_ctx->src_sg = req->src;
2176 ta_ctx->src_sg_hold = NULL;
2177 }
2178
2179 src_ptr = sg_virt(ta_ctx->src_sg);
2180
2181 if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2182 /* null data */
2183 src_ptr = NULL;
2184 }
2185
2186 ta_ctx->dcb_input_data.app_in_address = src_ptr;
2187 ta_ctx->dcb_input_data.data_in_size =
2188 req->nbytes - (head_len + tail_len);
2189 ta_ctx->dcb_input_data.app_out_address = NULL;
2190 ta_ctx->dcb_input_data.block_size = block_size;
2191 ta_ctx->dcb_input_data.tail_block_size = 0;
2192 ta_ctx->dcb_input_data.is_applet = 0;
2193 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2194 ta_ctx->dcb_input_data.dst_sg = NULL;
2195
2196 int_error = sep_create_dcb_dmatables_context_kernel(
2197 ta_ctx->sep_used,
2198 &ta_ctx->dcb_region,
2199 &ta_ctx->dmatables_region,
2200 &ta_ctx->dma_ctx,
2201 &ta_ctx->dcb_input_data,
2202 1);
2203 if (int_error) {
2204 dev_warn(&ta_ctx->sep_used->pdev->dev,
2205 "hash update dma table create failed\n");
2206 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2207 return;
2208 }
2209
2210 /* Construct message to SEP */
2211 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_UPDATE_OPCODE);
2212
2213 msg[0] = (u32)0;
2214 msg[1] = (u32)0;
2215 msg[2] = (u32)0;
2216
2217 sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2218 &msg_offset, 0);
2219
2220 /* Handle remainders */
2221
2222 /* Head */
2223 sep_write_msg(ta_ctx, &head_len, sizeof(u32),
2224 sizeof(u32), &msg_offset, 0);
2225
2226 if (head_len) {
2227 copy_result = sg_copy_to_buffer(
2228 req->src,
2229 sep_sg_nents(ta_ctx->src_sg),
2230 small_buf, head_len);
2231
2232 if (copy_result != head_len) {
2233 dev_warn(&ta_ctx->sep_used->pdev->dev,
2234 "sg head copy failure in hash block\n");
2235 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2236 return;
2237 }
2238
2239 sep_write_msg(ta_ctx, small_buf, head_len,
2240 sizeof(u32) * 32, &msg_offset, 1);
2241 } else {
2242 msg_offset += sizeof(u32) * 32;
2243 }
2244
2245 /* Tail */
2246 sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2247 sizeof(u32), &msg_offset, 0);
2248
2249 if (tail_len) {
2250 copy_result = sep_copy_offset_sg(
2251 ta_ctx->sep_used,
2252 ta_ctx->src_sg,
2253 req->nbytes - tail_len,
2254 small_buf, tail_len);
2255
2256 if (copy_result != tail_len) {
2257 dev_warn(&ta_ctx->sep_used->pdev->dev,
2258 "sg tail copy failure in hash block\n");
2259 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2260 return;
2261 }
2262
2263 sep_write_msg(ta_ctx, small_buf, tail_len,
2264 sizeof(u32) * 32, &msg_offset, 1);
2265 } else {
2266 msg_offset += sizeof(u32) * 32;
2267 }
2268
2269 /* Context */
2270 sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2271 sizeof(struct sep_hash_private_context));
2272
2273 sep_end_msg(ta_ctx, msg_offset);
2274 are_we_done_yet = 0;
2275 int_error = sep_crypto_take_sep(ta_ctx);
2276 if (int_error) {
2277 dev_warn(&ta_ctx->sep_used->pdev->dev,
2278 "sep_hash_update take sep failed\n");
2279 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2280 }
2281
2282 /* now we sit and wait up to a fixed time for completion */
2283 end_time = jiffies + (WAIT_TIME * HZ);
2284 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2285 schedule();
2286
2287 /* Done waiting; still not done yet? */
2288 if (are_we_done_yet == 0) {
2289 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2290 "hash update never got done\n");
2291 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2292 return;
2293 }
2294
2295 }
2296
2297 static void sep_hash_final(void *data)
2298 {
2299 u32 msg_offset;
2300 struct ahash_request *req;
2301 struct crypto_ahash *tfm;
2302 struct this_task_ctx *ta_ctx;
2303 struct sep_system_ctx *sctx;
2304 int result;
2305 unsigned long end_time;
2306 int are_we_done_yet;
2307
2308 req = (struct ahash_request *)data;
2309 tfm = crypto_ahash_reqtfm(req);
2310 sctx = crypto_ahash_ctx(tfm);
2311 ta_ctx = ahash_request_ctx(req);
2312 ta_ctx->sep_used = sep_dev;
2313
2314 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2315 "sep_hash_final\n");
2316 ta_ctx->current_hash_stage = HASH_FINISH;
2317
2318 ta_ctx->are_we_done_yet = &are_we_done_yet;
2319
2320 /* opcode and mode */
2321 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_FINISH_OPCODE);
2322
2323 /* Context */
2324 sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2325 sizeof(struct sep_hash_private_context));
2326
2327 sep_end_msg(ta_ctx, msg_offset);
2328 are_we_done_yet = 0;
2329 result = sep_crypto_take_sep(ta_ctx);
2330 if (result) {
2331 dev_warn(&ta_ctx->sep_used->pdev->dev,
2332 "sep_hash_final take sep failed\n");
2333 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2334 }
2335
2336 /* now we sit and wait up to a fixed time for completion */
2337 end_time = jiffies + (WAIT_TIME * HZ);
2338 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2339 schedule();
2340
2341 /* Done waiting; still not done yet? */
2342 if (are_we_done_yet == 0) {
2343 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2344 "hash final job never got done\n");
2345 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2346 return;
2347 }
2348
2349 }
2350
2351 static void sep_hash_digest(void *data)
2352 {
2353 int int_error;
2354 u32 msg_offset;
2355 u32 block_size;
2356 u32 msg[10];
2357 size_t copy_result;
2358 int result;
2359 int are_we_done_yet;
2360 u32 tail_len;
2361 static char small_buf[100];
2362 struct scatterlist *new_sg;
2363 void *src_ptr;
2364
2365 struct ahash_request *req;
2366 struct crypto_ahash *tfm;
2367 struct this_task_ctx *ta_ctx;
2368 struct sep_system_ctx *sctx;
2369 unsigned long end_time;
2370
2371 req = (struct ahash_request *)data;
2372 tfm = crypto_ahash_reqtfm(req);
2373 sctx = crypto_ahash_ctx(tfm);
2374 ta_ctx = ahash_request_ctx(req);
2375 ta_ctx->sep_used = sep_dev;
2376
2377 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2378 "sep_hash_digest\n");
2379 ta_ctx->current_hash_stage = HASH_DIGEST;
2380
2381 ta_ctx->are_we_done_yet = &are_we_done_yet;
2382
2383 /* length for queue status */
2384 ta_ctx->nbytes = req->nbytes;
2385
2386 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2387 tail_len = req->nbytes % block_size;
2388 dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", req->nbytes);
2389 dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2390 dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2391
2392 /* Make sure all pages are an even block */
2393 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2394 req->nbytes,
2395 block_size, &new_sg, 1);
2396
2397 if (int_error < 0) {
2398 dev_warn(&ta_ctx->sep_used->pdev->dev,
2399 "oddball pages error in crash update\n");
2400 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2401 return;
2402 } else if (int_error == 1) {
2403 ta_ctx->src_sg = new_sg;
2404 ta_ctx->src_sg_hold = new_sg;
2405 } else {
2406 ta_ctx->src_sg = req->src;
2407 ta_ctx->src_sg_hold = NULL;
2408 }
2409
2410 src_ptr = sg_virt(ta_ctx->src_sg);
2411
2412 if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2413 /* null data */
2414 src_ptr = NULL;
2415 }
2416
2417 ta_ctx->dcb_input_data.app_in_address = src_ptr;
2418 ta_ctx->dcb_input_data.data_in_size = req->nbytes - tail_len;
2419 ta_ctx->dcb_input_data.app_out_address = NULL;
2420 ta_ctx->dcb_input_data.block_size = block_size;
2421 ta_ctx->dcb_input_data.tail_block_size = 0;
2422 ta_ctx->dcb_input_data.is_applet = 0;
2423 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2424 ta_ctx->dcb_input_data.dst_sg = NULL;
2425
2426 int_error = sep_create_dcb_dmatables_context_kernel(
2427 ta_ctx->sep_used,
2428 &ta_ctx->dcb_region,
2429 &ta_ctx->dmatables_region,
2430 &ta_ctx->dma_ctx,
2431 &ta_ctx->dcb_input_data,
2432 1);
2433 if (int_error) {
2434 dev_warn(&ta_ctx->sep_used->pdev->dev,
2435 "hash update dma table create failed\n");
2436 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2437 return;
2438 }
2439
2440 /* Construct message to SEP */
2441 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_SINGLE_OPCODE);
2442 sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2443 sizeof(u32), sizeof(u32), &msg_offset, 0);
2444
2445 msg[0] = (u32)0;
2446 msg[1] = (u32)0;
2447 msg[2] = (u32)0;
2448
2449 sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2450 &msg_offset, 0);
2451
2452 /* Tail */
2453 sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2454 sizeof(u32), &msg_offset, 0);
2455
2456 if (tail_len) {
2457 copy_result = sep_copy_offset_sg(
2458 ta_ctx->sep_used,
2459 ta_ctx->src_sg,
2460 req->nbytes - tail_len,
2461 small_buf, tail_len);
2462
2463 if (copy_result != tail_len) {
2464 dev_warn(&ta_ctx->sep_used->pdev->dev,
2465 "sg tail copy failure in hash block\n");
2466 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2467 return;
2468 }
2469
2470 sep_write_msg(ta_ctx, small_buf, tail_len,
2471 sizeof(u32) * 32, &msg_offset, 1);
2472 } else {
2473 msg_offset += sizeof(u32) * 32;
2474 }
2475
2476 sep_end_msg(ta_ctx, msg_offset);
2477
2478 are_we_done_yet = 0;
2479 result = sep_crypto_take_sep(ta_ctx);
2480 if (result) {
2481 dev_warn(&ta_ctx->sep_used->pdev->dev,
2482 "sep_hash_digest take sep failed\n");
2483 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2484 }
2485
2486 /* now we sit and wait up to a fixed time for completion */
2487 end_time = jiffies + (WAIT_TIME * HZ);
2488 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2489 schedule();
2490
2491 /* Done waiting; still not done yet? */
2492 if (are_we_done_yet == 0) {
2493 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2494 "hash digest job never got done\n");
2495 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2496 return;
2497 }
2498
2499 }
2500
2501 /**
2502 * This is what is called by each of the API's provided
2503 * in the kernel crypto descriptors. It is run in a process
2504 * context using the kernel workqueues. Therefore it can
2505 * be put to sleep.
2506 */
2507 static void sep_dequeuer(void *data)
2508 {
2509 struct crypto_queue *this_queue;
2510 struct crypto_async_request *async_req;
2511 struct crypto_async_request *backlog;
2512 struct ablkcipher_request *cypher_req;
2513 struct ahash_request *hash_req;
2514 struct sep_system_ctx *sctx;
2515 struct crypto_ahash *hash_tfm;
2516 struct this_task_ctx *ta_ctx;
2517
2518
2519 this_queue = (struct crypto_queue *)data;
2520
2521 spin_lock_irq(&queue_lock);
2522 backlog = crypto_get_backlog(this_queue);
2523 async_req = crypto_dequeue_request(this_queue);
2524 spin_unlock_irq(&queue_lock);
2525
2526 if (!async_req) {
2527 pr_debug("sep crypto queue is empty\n");
2528 return;
2529 }
2530
2531 if (backlog) {
2532 pr_debug("sep crypto backlog set\n");
2533 if (backlog->complete)
2534 backlog->complete(backlog, -EINPROGRESS);
2535 backlog = NULL;
2536 }
2537
2538 if (!async_req->tfm) {
2539 pr_debug("sep crypto queue null tfm\n");
2540 return;
2541 }
2542
2543 if (!async_req->tfm->__crt_alg) {
2544 pr_debug("sep crypto queue null __crt_alg\n");
2545 return;
2546 }
2547
2548 if (!async_req->tfm->__crt_alg->cra_type) {
2549 pr_debug("sep crypto queue null cra_type\n");
2550 return;
2551 }
2552
2553 /* we have stuff in the queue */
2554 if (async_req->tfm->__crt_alg->cra_type !=
2555 &crypto_ahash_type) {
2556 /* This is for a cypher */
2557 pr_debug("sep crypto queue doing cipher\n");
2558 cypher_req = container_of(async_req,
2559 struct ablkcipher_request,
2560 base);
2561 if (!cypher_req) {
2562 pr_debug("sep crypto queue null cypher_req\n");
2563 return;
2564 }
2565
2566 sep_crypto_block((void *)cypher_req);
2567 return;
2568 } else {
2569 /* This is a hash */
2570 pr_debug("sep crypto queue doing hash\n");
2571 /**
2572 * This is a bit more complex than cipher; we
2573 * need to figure out what type of operation
2574 */
2575 hash_req = ahash_request_cast(async_req);
2576 if (!hash_req) {
2577 pr_debug("sep crypto queue null hash_req\n");
2578 return;
2579 }
2580
2581 hash_tfm = crypto_ahash_reqtfm(hash_req);
2582 if (!hash_tfm) {
2583 pr_debug("sep crypto queue null hash_tfm\n");
2584 return;
2585 }
2586
2587
2588 sctx = crypto_ahash_ctx(hash_tfm);
2589 if (!sctx) {
2590 pr_debug("sep crypto queue null sctx\n");
2591 return;
2592 }
2593
2594 ta_ctx = ahash_request_ctx(hash_req);
2595
2596 if (ta_ctx->current_hash_stage == HASH_INIT) {
2597 pr_debug("sep crypto queue hash init\n");
2598 sep_hash_init((void *)hash_req);
2599 return;
2600 } else if (ta_ctx->current_hash_stage == HASH_UPDATE) {
2601 pr_debug("sep crypto queue hash update\n");
2602 sep_hash_update((void *)hash_req);
2603 return;
2604 } else if (ta_ctx->current_hash_stage == HASH_FINISH) {
2605 pr_debug("sep crypto queue hash final\n");
2606 sep_hash_final((void *)hash_req);
2607 return;
2608 } else if (ta_ctx->current_hash_stage == HASH_DIGEST) {
2609 pr_debug("sep crypto queue hash digest\n");
2610 sep_hash_digest((void *)hash_req);
2611 return;
2612 } else if (ta_ctx->current_hash_stage == HASH_FINUP_DATA) {
2613 pr_debug("sep crypto queue hash digest\n");
2614 sep_hash_update((void *)hash_req);
2615 return;
2616 } else if (ta_ctx->current_hash_stage == HASH_FINUP_FINISH) {
2617 pr_debug("sep crypto queue hash digest\n");
2618 sep_hash_final((void *)hash_req);
2619 return;
2620 } else {
2621 pr_debug("sep crypto queue hash oops nothing\n");
2622 return;
2623 }
2624 }
2625 }
2626
2627 static int sep_sha1_init(struct ahash_request *req)
2628 {
2629 int error;
2630 int error1;
2631 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2632
2633 pr_debug("sep - doing sha1 init\n");
2634
2635 /* Clear out task context */
2636 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2637
2638 ta_ctx->sep_used = sep_dev;
2639 ta_ctx->current_request = SHA1;
2640 ta_ctx->current_hash_req = req;
2641 ta_ctx->current_cypher_req = NULL;
2642 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2643 ta_ctx->current_hash_stage = HASH_INIT;
2644
2645 /* lock necessary so that only one entity touches the queues */
2646 spin_lock_irq(&queue_lock);
2647 error = crypto_enqueue_request(&sep_queue, &req->base);
2648
2649 if ((error != 0) && (error != -EINPROGRESS))
2650 pr_debug(" sep - crypto enqueue failed: %x\n",
2651 error);
2652 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2653 sep_dequeuer, (void *)&sep_queue);
2654 if (error1)
2655 pr_debug(" sep - workqueue submit failed: %x\n",
2656 error1);
2657 spin_unlock_irq(&queue_lock);
2658 /* We return result of crypto enqueue */
2659 return error;
2660 }
2661
2662 static int sep_sha1_update(struct ahash_request *req)
2663 {
2664 int error;
2665 int error1;
2666 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2667
2668 pr_debug("sep - doing sha1 update\n");
2669
2670 ta_ctx->sep_used = sep_dev;
2671 ta_ctx->current_request = SHA1;
2672 ta_ctx->current_hash_req = req;
2673 ta_ctx->current_cypher_req = NULL;
2674 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2675 ta_ctx->current_hash_stage = HASH_UPDATE;
2676
2677 /* lock necessary so that only one entity touches the queues */
2678 spin_lock_irq(&queue_lock);
2679 error = crypto_enqueue_request(&sep_queue, &req->base);
2680
2681 if ((error != 0) && (error != -EINPROGRESS))
2682 pr_debug(" sep - crypto enqueue failed: %x\n",
2683 error);
2684 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2685 sep_dequeuer, (void *)&sep_queue);
2686 if (error1)
2687 pr_debug(" sep - workqueue submit failed: %x\n",
2688 error1);
2689 spin_unlock_irq(&queue_lock);
2690 /* We return result of crypto enqueue */
2691 return error;
2692 }
2693
2694 static int sep_sha1_final(struct ahash_request *req)
2695 {
2696 int error;
2697 int error1;
2698 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2699 pr_debug("sep - doing sha1 final\n");
2700
2701 ta_ctx->sep_used = sep_dev;
2702 ta_ctx->current_request = SHA1;
2703 ta_ctx->current_hash_req = req;
2704 ta_ctx->current_cypher_req = NULL;
2705 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2706 ta_ctx->current_hash_stage = HASH_FINISH;
2707
2708 /* lock necessary so that only one entity touches the queues */
2709 spin_lock_irq(&queue_lock);
2710 error = crypto_enqueue_request(&sep_queue, &req->base);
2711
2712 if ((error != 0) && (error != -EINPROGRESS))
2713 pr_debug(" sep - crypto enqueue failed: %x\n",
2714 error);
2715 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2716 sep_dequeuer, (void *)&sep_queue);
2717 if (error1)
2718 pr_debug(" sep - workqueue submit failed: %x\n",
2719 error1);
2720 spin_unlock_irq(&queue_lock);
2721 /* We return result of crypto enqueue */
2722 return error;
2723 }
2724
2725 static int sep_sha1_digest(struct ahash_request *req)
2726 {
2727 int error;
2728 int error1;
2729 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2730 pr_debug("sep - doing sha1 digest\n");
2731
2732 /* Clear out task context */
2733 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2734
2735 ta_ctx->sep_used = sep_dev;
2736 ta_ctx->current_request = SHA1;
2737 ta_ctx->current_hash_req = req;
2738 ta_ctx->current_cypher_req = NULL;
2739 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2740 ta_ctx->current_hash_stage = HASH_DIGEST;
2741
2742 /* lock necessary so that only one entity touches the queues */
2743 spin_lock_irq(&queue_lock);
2744 error = crypto_enqueue_request(&sep_queue, &req->base);
2745
2746 if ((error != 0) && (error != -EINPROGRESS))
2747 pr_debug(" sep - crypto enqueue failed: %x\n",
2748 error);
2749 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2750 sep_dequeuer, (void *)&sep_queue);
2751 if (error1)
2752 pr_debug(" sep - workqueue submit failed: %x\n",
2753 error1);
2754 spin_unlock_irq(&queue_lock);
2755 /* We return result of crypto enqueue */
2756 return error;
2757 }
2758
2759 static int sep_sha1_finup(struct ahash_request *req)
2760 {
2761 int error;
2762 int error1;
2763 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2764 pr_debug("sep - doing sha1 finup\n");
2765
2766 ta_ctx->sep_used = sep_dev;
2767 ta_ctx->current_request = SHA1;
2768 ta_ctx->current_hash_req = req;
2769 ta_ctx->current_cypher_req = NULL;
2770 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2771 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
2772
2773 /* lock necessary so that only one entity touches the queues */
2774 spin_lock_irq(&queue_lock);
2775 error = crypto_enqueue_request(&sep_queue, &req->base);
2776
2777 if ((error != 0) && (error != -EINPROGRESS))
2778 pr_debug(" sep - crypto enqueue failed: %x\n",
2779 error);
2780 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2781 sep_dequeuer, (void *)&sep_queue);
2782 if (error1)
2783 pr_debug(" sep - workqueue submit failed: %x\n",
2784 error1);
2785 spin_unlock_irq(&queue_lock);
2786 /* We return result of crypto enqueue */
2787 return error;
2788 }
2789
2790 static int sep_md5_init(struct ahash_request *req)
2791 {
2792 int error;
2793 int error1;
2794 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2795 pr_debug("sep - doing md5 init\n");
2796
2797 /* Clear out task context */
2798 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2799
2800 ta_ctx->sep_used = sep_dev;
2801 ta_ctx->current_request = MD5;
2802 ta_ctx->current_hash_req = req;
2803 ta_ctx->current_cypher_req = NULL;
2804 ta_ctx->hash_opmode = SEP_HASH_MD5;
2805 ta_ctx->current_hash_stage = HASH_INIT;
2806
2807 /* lock necessary so that only one entity touches the queues */
2808 spin_lock_irq(&queue_lock);
2809 error = crypto_enqueue_request(&sep_queue, &req->base);
2810
2811 if ((error != 0) && (error != -EINPROGRESS))
2812 pr_debug(" sep - crypto enqueue failed: %x\n",
2813 error);
2814 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2815 sep_dequeuer, (void *)&sep_queue);
2816 if (error1)
2817 pr_debug(" sep - workqueue submit failed: %x\n",
2818 error1);
2819 spin_unlock_irq(&queue_lock);
2820 /* We return result of crypto enqueue */
2821 return error;
2822 }
2823
2824 static int sep_md5_update(struct ahash_request *req)
2825 {
2826 int error;
2827 int error1;
2828 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2829 pr_debug("sep - doing md5 update\n");
2830
2831 ta_ctx->sep_used = sep_dev;
2832 ta_ctx->current_request = MD5;
2833 ta_ctx->current_hash_req = req;
2834 ta_ctx->current_cypher_req = NULL;
2835 ta_ctx->hash_opmode = SEP_HASH_MD5;
2836 ta_ctx->current_hash_stage = HASH_UPDATE;
2837
2838 /* lock necessary so that only one entity touches the queues */
2839 spin_lock_irq(&queue_lock);
2840 error = crypto_enqueue_request(&sep_queue, &req->base);
2841
2842 if ((error != 0) && (error != -EINPROGRESS))
2843 pr_debug(" sep - crypto enqueue failed: %x\n",
2844 error);
2845 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2846 sep_dequeuer, (void *)&sep_queue);
2847 if (error1)
2848 pr_debug(" sep - workqueue submit failed: %x\n",
2849 error1);
2850 spin_unlock_irq(&queue_lock);
2851 /* We return result of crypto enqueue */
2852 return error;
2853 }
2854
2855 static int sep_md5_final(struct ahash_request *req)
2856 {
2857 int error;
2858 int error1;
2859 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2860 pr_debug("sep - doing md5 final\n");
2861
2862 ta_ctx->sep_used = sep_dev;
2863 ta_ctx->current_request = MD5;
2864 ta_ctx->current_hash_req = req;
2865 ta_ctx->current_cypher_req = NULL;
2866 ta_ctx->hash_opmode = SEP_HASH_MD5;
2867 ta_ctx->current_hash_stage = HASH_FINISH;
2868
2869 /* lock necessary so that only one entity touches the queues */
2870 spin_lock_irq(&queue_lock);
2871 error = crypto_enqueue_request(&sep_queue, &req->base);
2872
2873 if ((error != 0) && (error != -EINPROGRESS))
2874 pr_debug(" sep - crypto enqueue failed: %x\n",
2875 error);
2876 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2877 sep_dequeuer, (void *)&sep_queue);
2878 if (error1)
2879 pr_debug(" sep - workqueue submit failed: %x\n",
2880 error1);
2881 spin_unlock_irq(&queue_lock);
2882 /* We return result of crypto enqueue */
2883 return error;
2884 }
2885
2886 static int sep_md5_digest(struct ahash_request *req)
2887 {
2888 int error;
2889 int error1;
2890 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2891
2892 pr_debug("sep - doing md5 digest\n");
2893
2894 /* Clear out task context */
2895 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2896
2897 ta_ctx->sep_used = sep_dev;
2898 ta_ctx->current_request = MD5;
2899 ta_ctx->current_hash_req = req;
2900 ta_ctx->current_cypher_req = NULL;
2901 ta_ctx->hash_opmode = SEP_HASH_MD5;
2902 ta_ctx->current_hash_stage = HASH_DIGEST;
2903
2904 /* lock necessary so that only one entity touches the queues */
2905 spin_lock_irq(&queue_lock);
2906 error = crypto_enqueue_request(&sep_queue, &req->base);
2907
2908 if ((error != 0) && (error != -EINPROGRESS))
2909 pr_debug(" sep - crypto enqueue failed: %x\n",
2910 error);
2911 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2912 sep_dequeuer, (void *)&sep_queue);
2913 if (error1)
2914 pr_debug(" sep - workqueue submit failed: %x\n",
2915 error1);
2916 spin_unlock_irq(&queue_lock);
2917 /* We return result of crypto enqueue */
2918 return error;
2919 }
2920
2921 static int sep_md5_finup(struct ahash_request *req)
2922 {
2923 int error;
2924 int error1;
2925 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2926
2927 pr_debug("sep - doing md5 finup\n");
2928
2929 ta_ctx->sep_used = sep_dev;
2930 ta_ctx->current_request = MD5;
2931 ta_ctx->current_hash_req = req;
2932 ta_ctx->current_cypher_req = NULL;
2933 ta_ctx->hash_opmode = SEP_HASH_MD5;
2934 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
2935
2936 /* lock necessary so that only one entity touches the queues */
2937 spin_lock_irq(&queue_lock);
2938 error = crypto_enqueue_request(&sep_queue, &req->base);
2939
2940 if ((error != 0) && (error != -EINPROGRESS))
2941 pr_debug(" sep - crypto enqueue failed: %x\n",
2942 error);
2943 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2944 sep_dequeuer, (void *)&sep_queue);
2945 if (error1)
2946 pr_debug(" sep - workqueue submit failed: %x\n",
2947 error1);
2948 spin_unlock_irq(&queue_lock);
2949 /* We return result of crypto enqueue */
2950 return error;
2951 }
2952
2953 static int sep_sha224_init(struct ahash_request *req)
2954 {
2955 int error;
2956 int error1;
2957 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2958 pr_debug("sep - doing sha224 init\n");
2959
2960 /* Clear out task context */
2961 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2962
2963 ta_ctx->sep_used = sep_dev;
2964 ta_ctx->current_request = SHA224;
2965 ta_ctx->current_hash_req = req;
2966 ta_ctx->current_cypher_req = NULL;
2967 ta_ctx->hash_opmode = SEP_HASH_SHA224;
2968 ta_ctx->current_hash_stage = HASH_INIT;
2969
2970 /* lock necessary so that only one entity touches the queues */
2971 spin_lock_irq(&queue_lock);
2972 error = crypto_enqueue_request(&sep_queue, &req->base);
2973
2974 if ((error != 0) && (error != -EINPROGRESS))
2975 pr_debug(" sep - crypto enqueue failed: %x\n",
2976 error);
2977 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2978 sep_dequeuer, (void *)&sep_queue);
2979 if (error1)
2980 pr_debug(" sep - workqueue submit failed: %x\n",
2981 error1);
2982 spin_unlock_irq(&queue_lock);
2983 /* We return result of crypto enqueue */
2984 return error;
2985 }
2986
2987 static int sep_sha224_update(struct ahash_request *req)
2988 {
2989 int error;
2990 int error1;
2991 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2992 pr_debug("sep - doing sha224 update\n");
2993
2994 ta_ctx->sep_used = sep_dev;
2995 ta_ctx->current_request = SHA224;
2996 ta_ctx->current_hash_req = req;
2997 ta_ctx->current_cypher_req = NULL;
2998 ta_ctx->hash_opmode = SEP_HASH_SHA224;
2999 ta_ctx->current_hash_stage = HASH_UPDATE;
3000
3001 /* lock necessary so that only one entity touches the queues */
3002 spin_lock_irq(&queue_lock);
3003 error = crypto_enqueue_request(&sep_queue, &req->base);
3004
3005 if ((error != 0) && (error != -EINPROGRESS))
3006 pr_debug(" sep - crypto enqueue failed: %x\n",
3007 error);
3008 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3009 sep_dequeuer, (void *)&sep_queue);
3010 if (error1)
3011 pr_debug(" sep - workqueue submit failed: %x\n",
3012 error1);
3013 spin_unlock_irq(&queue_lock);
3014 /* We return result of crypto enqueue */
3015 return error;
3016 }
3017
3018 static int sep_sha224_final(struct ahash_request *req)
3019 {
3020 int error;
3021 int error1;
3022 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3023 pr_debug("sep - doing sha224 final\n");
3024
3025 ta_ctx->sep_used = sep_dev;
3026 ta_ctx->current_request = SHA224;
3027 ta_ctx->current_hash_req = req;
3028 ta_ctx->current_cypher_req = NULL;
3029 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3030 ta_ctx->current_hash_stage = HASH_FINISH;
3031
3032 /* lock necessary so that only one entity touches the queues */
3033 spin_lock_irq(&queue_lock);
3034 error = crypto_enqueue_request(&sep_queue, &req->base);
3035
3036 if ((error != 0) && (error != -EINPROGRESS))
3037 pr_debug(" sep - crypto enqueue failed: %x\n",
3038 error);
3039 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3040 sep_dequeuer, (void *)&sep_queue);
3041 if (error1)
3042 pr_debug(" sep - workqueue submit failed: %x\n",
3043 error1);
3044 spin_unlock_irq(&queue_lock);
3045 /* We return result of crypto enqueue */
3046 return error;
3047 }
3048
3049 static int sep_sha224_digest(struct ahash_request *req)
3050 {
3051 int error;
3052 int error1;
3053 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3054
3055 pr_debug("sep - doing sha224 digest\n");
3056
3057 /* Clear out task context */
3058 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3059
3060 ta_ctx->sep_used = sep_dev;
3061 ta_ctx->current_request = SHA224;
3062 ta_ctx->current_hash_req = req;
3063 ta_ctx->current_cypher_req = NULL;
3064 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3065 ta_ctx->current_hash_stage = HASH_DIGEST;
3066
3067 /* lock necessary so that only one entity touches the queues */
3068 spin_lock_irq(&queue_lock);
3069 error = crypto_enqueue_request(&sep_queue, &req->base);
3070
3071 if ((error != 0) && (error != -EINPROGRESS))
3072 pr_debug(" sep - crypto enqueue failed: %x\n",
3073 error);
3074 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3075 sep_dequeuer, (void *)&sep_queue);
3076 if (error1)
3077 pr_debug(" sep - workqueue submit failed: %x\n",
3078 error1);
3079 spin_unlock_irq(&queue_lock);
3080 /* We return result of crypto enqueue */
3081 return error;
3082 }
3083
3084 static int sep_sha224_finup(struct ahash_request *req)
3085 {
3086 int error;
3087 int error1;
3088 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3089
3090 pr_debug("sep - doing sha224 finup\n");
3091
3092 ta_ctx->sep_used = sep_dev;
3093 ta_ctx->current_request = SHA224;
3094 ta_ctx->current_hash_req = req;
3095 ta_ctx->current_cypher_req = NULL;
3096 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3097 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3098
3099 /* lock necessary so that only one entity touches the queues */
3100 spin_lock_irq(&queue_lock);
3101 error = crypto_enqueue_request(&sep_queue, &req->base);
3102
3103 if ((error != 0) && (error != -EINPROGRESS))
3104 pr_debug(" sep - crypto enqueue failed: %x\n",
3105 error);
3106 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3107 sep_dequeuer, (void *)&sep_queue);
3108 if (error1)
3109 pr_debug(" sep - workqueue submit failed: %x\n",
3110 error1);
3111 spin_unlock_irq(&queue_lock);
3112 /* We return result of crypto enqueue */
3113 return error;
3114 }
3115
3116 static int sep_sha256_init(struct ahash_request *req)
3117 {
3118 int error;
3119 int error1;
3120 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3121 pr_debug("sep - doing sha256 init\n");
3122
3123 /* Clear out task context */
3124 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3125
3126 ta_ctx->sep_used = sep_dev;
3127 ta_ctx->current_request = SHA256;
3128 ta_ctx->current_hash_req = req;
3129 ta_ctx->current_cypher_req = NULL;
3130 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3131 ta_ctx->current_hash_stage = HASH_INIT;
3132
3133 /* lock necessary so that only one entity touches the queues */
3134 spin_lock_irq(&queue_lock);
3135 error = crypto_enqueue_request(&sep_queue, &req->base);
3136
3137 if ((error != 0) && (error != -EINPROGRESS))
3138 pr_debug(" sep - crypto enqueue failed: %x\n",
3139 error);
3140 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3141 sep_dequeuer, (void *)&sep_queue);
3142 if (error1)
3143 pr_debug(" sep - workqueue submit failed: %x\n",
3144 error1);
3145 spin_unlock_irq(&queue_lock);
3146 /* We return result of crypto enqueue */
3147 return error;
3148 }
3149
3150 static int sep_sha256_update(struct ahash_request *req)
3151 {
3152 int error;
3153 int error1;
3154 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3155 pr_debug("sep - doing sha256 update\n");
3156
3157 ta_ctx->sep_used = sep_dev;
3158 ta_ctx->current_request = SHA256;
3159 ta_ctx->current_hash_req = req;
3160 ta_ctx->current_cypher_req = NULL;
3161 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3162 ta_ctx->current_hash_stage = HASH_UPDATE;
3163
3164 /* lock necessary so that only one entity touches the queues */
3165 spin_lock_irq(&queue_lock);
3166 error = crypto_enqueue_request(&sep_queue, &req->base);
3167
3168 if ((error != 0) && (error != -EINPROGRESS))
3169 pr_debug(" sep - crypto enqueue failed: %x\n",
3170 error);
3171 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3172 sep_dequeuer, (void *)&sep_queue);
3173 if (error1)
3174 pr_debug(" sep - workqueue submit failed: %x\n",
3175 error1);
3176 spin_unlock_irq(&queue_lock);
3177 /* We return result of crypto enqueue */
3178 return error;
3179 }
3180
3181 static int sep_sha256_final(struct ahash_request *req)
3182 {
3183 int error;
3184 int error1;
3185 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3186 pr_debug("sep - doing sha256 final\n");
3187
3188 ta_ctx->sep_used = sep_dev;
3189 ta_ctx->current_request = SHA256;
3190 ta_ctx->current_hash_req = req;
3191 ta_ctx->current_cypher_req = NULL;
3192 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3193 ta_ctx->current_hash_stage = HASH_FINISH;
3194
3195 /* lock necessary so that only one entity touches the queues */
3196 spin_lock_irq(&queue_lock);
3197 error = crypto_enqueue_request(&sep_queue, &req->base);
3198
3199 if ((error != 0) && (error != -EINPROGRESS))
3200 pr_debug(" sep - crypto enqueue failed: %x\n",
3201 error);
3202 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3203 sep_dequeuer, (void *)&sep_queue);
3204 if (error1)
3205 pr_debug(" sep - workqueue submit failed: %x\n",
3206 error1);
3207 spin_unlock_irq(&queue_lock);
3208 /* We return result of crypto enqueue */
3209 return error;
3210 }
3211
3212 static int sep_sha256_digest(struct ahash_request *req)
3213 {
3214 int error;
3215 int error1;
3216 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3217
3218 pr_debug("sep - doing sha256 digest\n");
3219
3220 /* Clear out task context */
3221 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3222
3223 ta_ctx->sep_used = sep_dev;
3224 ta_ctx->current_request = SHA256;
3225 ta_ctx->current_hash_req = req;
3226 ta_ctx->current_cypher_req = NULL;
3227 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3228 ta_ctx->current_hash_stage = HASH_DIGEST;
3229
3230 /* lock necessary so that only one entity touches the queues */
3231 spin_lock_irq(&queue_lock);
3232 error = crypto_enqueue_request(&sep_queue, &req->base);
3233
3234 if ((error != 0) && (error != -EINPROGRESS))
3235 pr_debug(" sep - crypto enqueue failed: %x\n",
3236 error);
3237 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3238 sep_dequeuer, (void *)&sep_queue);
3239 if (error1)
3240 pr_debug(" sep - workqueue submit failed: %x\n",
3241 error1);
3242 spin_unlock_irq(&queue_lock);
3243 /* We return result of crypto enqueue */
3244 return error;
3245 }
3246
3247 static int sep_sha256_finup(struct ahash_request *req)
3248 {
3249 int error;
3250 int error1;
3251 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3252
3253 pr_debug("sep - doing sha256 finup\n");
3254
3255 ta_ctx->sep_used = sep_dev;
3256 ta_ctx->current_request = SHA256;
3257 ta_ctx->current_hash_req = req;
3258 ta_ctx->current_cypher_req = NULL;
3259 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3260 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3261
3262 /* lock necessary so that only one entity touches the queues */
3263 spin_lock_irq(&queue_lock);
3264 error = crypto_enqueue_request(&sep_queue, &req->base);
3265
3266 if ((error != 0) && (error != -EINPROGRESS))
3267 pr_debug(" sep - crypto enqueue failed: %x\n",
3268 error);
3269 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3270 sep_dequeuer, (void *)&sep_queue);
3271 if (error1)
3272 pr_debug(" sep - workqueue submit failed: %x\n",
3273 error1);
3274 spin_unlock_irq(&queue_lock);
3275 /* We return result of crypto enqueue */
3276 return error;
3277 }
3278
3279 static int sep_crypto_init(struct crypto_tfm *tfm)
3280 {
3281 const char *alg_name = crypto_tfm_alg_name(tfm);
3282
3283 if (alg_name == NULL)
3284 pr_debug("sep_crypto_init alg is NULL\n");
3285 else
3286 pr_debug("sep_crypto_init alg is %s\n", alg_name);
3287
3288 tfm->crt_ablkcipher.reqsize = sizeof(struct this_task_ctx);
3289 return 0;
3290 }
3291
3292 static void sep_crypto_exit(struct crypto_tfm *tfm)
3293 {
3294 pr_debug("sep_crypto_exit\n");
3295 }
3296
3297 static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3298 unsigned int keylen)
3299 {
3300 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3301
3302 pr_debug("sep aes setkey\n");
3303
3304 pr_debug("tfm is %p sctx is %p\n", tfm, sctx);
3305 switch (keylen) {
3306 case SEP_AES_KEY_128_SIZE:
3307 sctx->aes_key_size = AES_128;
3308 break;
3309 case SEP_AES_KEY_192_SIZE:
3310 sctx->aes_key_size = AES_192;
3311 break;
3312 case SEP_AES_KEY_256_SIZE:
3313 sctx->aes_key_size = AES_256;
3314 break;
3315 case SEP_AES_KEY_512_SIZE:
3316 sctx->aes_key_size = AES_512;
3317 break;
3318 default:
3319 pr_debug("invalid sep aes key size %x\n",
3320 keylen);
3321 return -EINVAL;
3322 }
3323
3324 memset(&sctx->key.aes, 0, sizeof(u32) *
3325 SEP_AES_MAX_KEY_SIZE_WORDS);
3326 memcpy(&sctx->key.aes, key, keylen);
3327 sctx->keylen = keylen;
3328 /* Indicate to encrypt/decrypt function to send key to SEP */
3329 sctx->key_sent = 0;
3330
3331 return 0;
3332 }
3333
3334 static int sep_aes_ecb_encrypt(struct ablkcipher_request *req)
3335 {
3336 int error;
3337 int error1;
3338 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3339
3340 pr_debug("sep - doing aes ecb encrypt\n");
3341
3342 /* Clear out task context */
3343 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3344
3345 ta_ctx->sep_used = sep_dev;
3346 ta_ctx->current_request = AES_ECB;
3347 ta_ctx->current_hash_req = NULL;
3348 ta_ctx->current_cypher_req = req;
3349 ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3350 ta_ctx->aes_opmode = SEP_AES_ECB;
3351 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3352 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3353
3354 /* lock necessary so that only one entity touches the queues */
3355 spin_lock_irq(&queue_lock);
3356 error = crypto_enqueue_request(&sep_queue, &req->base);
3357
3358 if ((error != 0) && (error != -EINPROGRESS))
3359 pr_debug(" sep - crypto enqueue failed: %x\n",
3360 error);
3361 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3362 sep_dequeuer, (void *)&sep_queue);
3363 if (error1)
3364 pr_debug(" sep - workqueue submit failed: %x\n",
3365 error1);
3366 spin_unlock_irq(&queue_lock);
3367 /* We return result of crypto enqueue */
3368 return error;
3369 }
3370
3371 static int sep_aes_ecb_decrypt(struct ablkcipher_request *req)
3372 {
3373 int error;
3374 int error1;
3375 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3376
3377 pr_debug("sep - doing aes ecb decrypt\n");
3378
3379 /* Clear out task context */
3380 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3381
3382 ta_ctx->sep_used = sep_dev;
3383 ta_ctx->current_request = AES_ECB;
3384 ta_ctx->current_hash_req = NULL;
3385 ta_ctx->current_cypher_req = req;
3386 ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3387 ta_ctx->aes_opmode = SEP_AES_ECB;
3388 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3389 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3390
3391 /* lock necessary so that only one entity touches the queues */
3392 spin_lock_irq(&queue_lock);
3393 error = crypto_enqueue_request(&sep_queue, &req->base);
3394
3395 if ((error != 0) && (error != -EINPROGRESS))
3396 pr_debug(" sep - crypto enqueue failed: %x\n",
3397 error);
3398 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3399 sep_dequeuer, (void *)&sep_queue);
3400 if (error1)
3401 pr_debug(" sep - workqueue submit failed: %x\n",
3402 error1);
3403 spin_unlock_irq(&queue_lock);
3404 /* We return result of crypto enqueue */
3405 return error;
3406 }
3407
3408 static int sep_aes_cbc_encrypt(struct ablkcipher_request *req)
3409 {
3410 int error;
3411 int error1;
3412 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3413 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3414 crypto_ablkcipher_reqtfm(req));
3415
3416 pr_debug("sep - doing aes cbc encrypt\n");
3417
3418 /* Clear out task context */
3419 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3420
3421 pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3422 crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3423
3424 ta_ctx->sep_used = sep_dev;
3425 ta_ctx->current_request = AES_CBC;
3426 ta_ctx->current_hash_req = NULL;
3427 ta_ctx->current_cypher_req = req;
3428 ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3429 ta_ctx->aes_opmode = SEP_AES_CBC;
3430 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3431 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3432
3433 /* lock necessary so that only one entity touches the queues */
3434 spin_lock_irq(&queue_lock);
3435 error = crypto_enqueue_request(&sep_queue, &req->base);
3436
3437 if ((error != 0) && (error != -EINPROGRESS))
3438 pr_debug(" sep - crypto enqueue failed: %x\n",
3439 error);
3440 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3441 sep_dequeuer, (void *)&sep_queue);
3442 if (error1)
3443 pr_debug(" sep - workqueue submit failed: %x\n",
3444 error1);
3445 spin_unlock_irq(&queue_lock);
3446 /* We return result of crypto enqueue */
3447 return error;
3448 }
3449
3450 static int sep_aes_cbc_decrypt(struct ablkcipher_request *req)
3451 {
3452 int error;
3453 int error1;
3454 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3455 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3456 crypto_ablkcipher_reqtfm(req));
3457
3458 pr_debug("sep - doing aes cbc decrypt\n");
3459
3460 pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3461 crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3462
3463 /* Clear out task context */
3464 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3465
3466 ta_ctx->sep_used = sep_dev;
3467 ta_ctx->current_request = AES_CBC;
3468 ta_ctx->current_hash_req = NULL;
3469 ta_ctx->current_cypher_req = req;
3470 ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3471 ta_ctx->aes_opmode = SEP_AES_CBC;
3472 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3473 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3474
3475 /* lock necessary so that only one entity touches the queues */
3476 spin_lock_irq(&queue_lock);
3477 error = crypto_enqueue_request(&sep_queue, &req->base);
3478
3479 if ((error != 0) && (error != -EINPROGRESS))
3480 pr_debug(" sep - crypto enqueue failed: %x\n",
3481 error);
3482 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3483 sep_dequeuer, (void *)&sep_queue);
3484 if (error1)
3485 pr_debug(" sep - workqueue submit failed: %x\n",
3486 error1);
3487 spin_unlock_irq(&queue_lock);
3488 /* We return result of crypto enqueue */
3489 return error;
3490 }
3491
3492 static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3493 unsigned int keylen)
3494 {
3495 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3496 struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
3497 u32 *flags = &ctfm->crt_flags;
3498
3499 pr_debug("sep des setkey\n");
3500
3501 switch (keylen) {
3502 case DES_KEY_SIZE:
3503 sctx->des_nbr_keys = DES_KEY_1;
3504 break;
3505 case DES_KEY_SIZE * 2:
3506 sctx->des_nbr_keys = DES_KEY_2;
3507 break;
3508 case DES_KEY_SIZE * 3:
3509 sctx->des_nbr_keys = DES_KEY_3;
3510 break;
3511 default:
3512 pr_debug("invalid key size %x\n",
3513 keylen);
3514 return -EINVAL;
3515 }
3516
3517 if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY) &&
3518 (sep_weak_key(key, keylen))) {
3519
3520 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
3521 pr_debug("weak key\n");
3522 return -EINVAL;
3523 }
3524
3525 memset(&sctx->key.des, 0, sizeof(struct sep_des_key));
3526 memcpy(&sctx->key.des.key1, key, keylen);
3527 sctx->keylen = keylen;
3528 /* Indicate to encrypt/decrypt function to send key to SEP */
3529 sctx->key_sent = 0;
3530
3531 return 0;
3532 }
3533
3534 static int sep_des_ebc_encrypt(struct ablkcipher_request *req)
3535 {
3536 int error;
3537 int error1;
3538 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3539
3540 pr_debug("sep - doing des ecb encrypt\n");
3541
3542 /* Clear out task context */
3543 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3544
3545 ta_ctx->sep_used = sep_dev;
3546 ta_ctx->current_request = DES_ECB;
3547 ta_ctx->current_hash_req = NULL;
3548 ta_ctx->current_cypher_req = req;
3549 ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3550 ta_ctx->des_opmode = SEP_DES_ECB;
3551 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3552 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3553
3554 /* lock necessary so that only one entity touches the queues */
3555 spin_lock_irq(&queue_lock);
3556 error = crypto_enqueue_request(&sep_queue, &req->base);
3557
3558 if ((error != 0) && (error != -EINPROGRESS))
3559 pr_debug(" sep - crypto enqueue failed: %x\n",
3560 error);
3561 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3562 sep_dequeuer, (void *)&sep_queue);
3563 if (error1)
3564 pr_debug(" sep - workqueue submit failed: %x\n",
3565 error1);
3566 spin_unlock_irq(&queue_lock);
3567 /* We return result of crypto enqueue */
3568 return error;
3569 }
3570
3571 static int sep_des_ebc_decrypt(struct ablkcipher_request *req)
3572 {
3573 int error;
3574 int error1;
3575 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3576
3577 pr_debug("sep - doing des ecb decrypt\n");
3578
3579 /* Clear out task context */
3580 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3581
3582 ta_ctx->sep_used = sep_dev;
3583 ta_ctx->current_request = DES_ECB;
3584 ta_ctx->current_hash_req = NULL;
3585 ta_ctx->current_cypher_req = req;
3586 ta_ctx->des_encmode = SEP_DES_DECRYPT;
3587 ta_ctx->des_opmode = SEP_DES_ECB;
3588 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3589 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3590
3591 /* lock necessary so that only one entity touches the queues */
3592 spin_lock_irq(&queue_lock);
3593 error = crypto_enqueue_request(&sep_queue, &req->base);
3594
3595 if ((error != 0) && (error != -EINPROGRESS))
3596 pr_debug(" sep - crypto enqueue failed: %x\n",
3597 error);
3598 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3599 sep_dequeuer, (void *)&sep_queue);
3600 if (error1)
3601 pr_debug(" sep - workqueue submit failed: %x\n",
3602 error1);
3603 spin_unlock_irq(&queue_lock);
3604 /* We return result of crypto enqueue */
3605 return error;
3606 }
3607
3608 static int sep_des_cbc_encrypt(struct ablkcipher_request *req)
3609 {
3610 int error;
3611 int error1;
3612 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3613
3614 pr_debug("sep - doing des cbc encrypt\n");
3615
3616 /* Clear out task context */
3617 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3618
3619 ta_ctx->sep_used = sep_dev;
3620 ta_ctx->current_request = DES_CBC;
3621 ta_ctx->current_hash_req = NULL;
3622 ta_ctx->current_cypher_req = req;
3623 ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3624 ta_ctx->des_opmode = SEP_DES_CBC;
3625 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3626 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3627
3628 /* lock necessary so that only one entity touches the queues */
3629 spin_lock_irq(&queue_lock);
3630 error = crypto_enqueue_request(&sep_queue, &req->base);
3631
3632 if ((error != 0) && (error != -EINPROGRESS))
3633 pr_debug(" sep - crypto enqueue failed: %x\n",
3634 error);
3635 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3636 sep_dequeuer, (void *)&sep_queue);
3637 if (error1)
3638 pr_debug(" sep - workqueue submit failed: %x\n",
3639 error1);
3640 spin_unlock_irq(&queue_lock);
3641 /* We return result of crypto enqueue */
3642 return error;
3643 }
3644
3645 static int sep_des_cbc_decrypt(struct ablkcipher_request *req)
3646 {
3647 int error;
3648 int error1;
3649 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3650
3651 pr_debug("sep - doing des ecb decrypt\n");
3652
3653 /* Clear out task context */
3654 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3655
3656 ta_ctx->sep_used = sep_dev;
3657 ta_ctx->current_request = DES_CBC;
3658 ta_ctx->current_hash_req = NULL;
3659 ta_ctx->current_cypher_req = req;
3660 ta_ctx->des_encmode = SEP_DES_DECRYPT;
3661 ta_ctx->des_opmode = SEP_DES_CBC;
3662 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3663 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3664
3665 /* lock necessary so that only one entity touches the queues */
3666 spin_lock_irq(&queue_lock);
3667 error = crypto_enqueue_request(&sep_queue, &req->base);
3668
3669 if ((error != 0) && (error != -EINPROGRESS))
3670 pr_debug(" sep - crypto enqueue failed: %x\n",
3671 error);
3672 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3673 sep_dequeuer, (void *)&sep_queue);
3674 if (error1)
3675 pr_debug(" sep - workqueue submit failed: %x\n",
3676 error1);
3677 spin_unlock_irq(&queue_lock);
3678 /* We return result of crypto enqueue */
3679 return error;
3680 }
3681
3682 static struct ahash_alg hash_algs[] = {
3683 {
3684 .init = sep_sha1_init,
3685 .update = sep_sha1_update,
3686 .final = sep_sha1_final,
3687 .digest = sep_sha1_digest,
3688 .finup = sep_sha1_finup,
3689 .halg = {
3690 .digestsize = SHA1_DIGEST_SIZE,
3691 .base = {
3692 .cra_name = "sha1",
3693 .cra_driver_name = "sha1-sep",
3694 .cra_priority = 100,
3695 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3696 CRYPTO_ALG_ASYNC,
3697 .cra_blocksize = SHA1_BLOCK_SIZE,
3698 .cra_ctxsize = sizeof(struct sep_system_ctx),
3699 .cra_alignmask = 0,
3700 .cra_module = THIS_MODULE,
3701 .cra_init = sep_hash_cra_init,
3702 .cra_exit = sep_hash_cra_exit,
3703 }
3704 }
3705 },
3706 {
3707 .init = sep_md5_init,
3708 .update = sep_md5_update,
3709 .final = sep_md5_final,
3710 .digest = sep_md5_digest,
3711 .finup = sep_md5_finup,
3712 .halg = {
3713 .digestsize = MD5_DIGEST_SIZE,
3714 .base = {
3715 .cra_name = "md5",
3716 .cra_driver_name = "md5-sep",
3717 .cra_priority = 100,
3718 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3719 CRYPTO_ALG_ASYNC,
3720 .cra_blocksize = SHA1_BLOCK_SIZE,
3721 .cra_ctxsize = sizeof(struct sep_system_ctx),
3722 .cra_alignmask = 0,
3723 .cra_module = THIS_MODULE,
3724 .cra_init = sep_hash_cra_init,
3725 .cra_exit = sep_hash_cra_exit,
3726 }
3727 }
3728 },
3729 {
3730 .init = sep_sha224_init,
3731 .update = sep_sha224_update,
3732 .final = sep_sha224_final,
3733 .digest = sep_sha224_digest,
3734 .finup = sep_sha224_finup,
3735 .halg = {
3736 .digestsize = SHA224_DIGEST_SIZE,
3737 .base = {
3738 .cra_name = "sha224",
3739 .cra_driver_name = "sha224-sep",
3740 .cra_priority = 100,
3741 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3742 CRYPTO_ALG_ASYNC,
3743 .cra_blocksize = SHA224_BLOCK_SIZE,
3744 .cra_ctxsize = sizeof(struct sep_system_ctx),
3745 .cra_alignmask = 0,
3746 .cra_module = THIS_MODULE,
3747 .cra_init = sep_hash_cra_init,
3748 .cra_exit = sep_hash_cra_exit,
3749 }
3750 }
3751 },
3752 {
3753 .init = sep_sha256_init,
3754 .update = sep_sha256_update,
3755 .final = sep_sha256_final,
3756 .digest = sep_sha256_digest,
3757 .finup = sep_sha256_finup,
3758 .halg = {
3759 .digestsize = SHA256_DIGEST_SIZE,
3760 .base = {
3761 .cra_name = "sha256",
3762 .cra_driver_name = "sha256-sep",
3763 .cra_priority = 100,
3764 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3765 CRYPTO_ALG_ASYNC,
3766 .cra_blocksize = SHA256_BLOCK_SIZE,
3767 .cra_ctxsize = sizeof(struct sep_system_ctx),
3768 .cra_alignmask = 0,
3769 .cra_module = THIS_MODULE,
3770 .cra_init = sep_hash_cra_init,
3771 .cra_exit = sep_hash_cra_exit,
3772 }
3773 }
3774 }
3775 };
3776
3777 static struct crypto_alg crypto_algs[] = {
3778 {
3779 .cra_name = "ecb(aes)",
3780 .cra_driver_name = "ecb-aes-sep",
3781 .cra_priority = 100,
3782 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3783 .cra_blocksize = AES_BLOCK_SIZE,
3784 .cra_ctxsize = sizeof(struct sep_system_ctx),
3785 .cra_alignmask = 0,
3786 .cra_type = &crypto_ablkcipher_type,
3787 .cra_module = THIS_MODULE,
3788 .cra_init = sep_crypto_init,
3789 .cra_exit = sep_crypto_exit,
3790 .cra_u.ablkcipher = {
3791 .min_keysize = AES_MIN_KEY_SIZE,
3792 .max_keysize = AES_MAX_KEY_SIZE,
3793 .setkey = sep_aes_setkey,
3794 .encrypt = sep_aes_ecb_encrypt,
3795 .decrypt = sep_aes_ecb_decrypt,
3796 }
3797 },
3798 {
3799 .cra_name = "cbc(aes)",
3800 .cra_driver_name = "cbc-aes-sep",
3801 .cra_priority = 100,
3802 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3803 .cra_blocksize = AES_BLOCK_SIZE,
3804 .cra_ctxsize = sizeof(struct sep_system_ctx),
3805 .cra_alignmask = 0,
3806 .cra_type = &crypto_ablkcipher_type,
3807 .cra_module = THIS_MODULE,
3808 .cra_init = sep_crypto_init,
3809 .cra_exit = sep_crypto_exit,
3810 .cra_u.ablkcipher = {
3811 .min_keysize = AES_MIN_KEY_SIZE,
3812 .max_keysize = AES_MAX_KEY_SIZE,
3813 .setkey = sep_aes_setkey,
3814 .encrypt = sep_aes_cbc_encrypt,
3815 .ivsize = AES_BLOCK_SIZE,
3816 .decrypt = sep_aes_cbc_decrypt,
3817 }
3818 },
3819 {
3820 .cra_name = "ebc(des)",
3821 .cra_driver_name = "ebc-des-sep",
3822 .cra_priority = 100,
3823 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3824 .cra_blocksize = DES_BLOCK_SIZE,
3825 .cra_ctxsize = sizeof(struct sep_system_ctx),
3826 .cra_alignmask = 0,
3827 .cra_type = &crypto_ablkcipher_type,
3828 .cra_module = THIS_MODULE,
3829 .cra_init = sep_crypto_init,
3830 .cra_exit = sep_crypto_exit,
3831 .cra_u.ablkcipher = {
3832 .min_keysize = DES_KEY_SIZE,
3833 .max_keysize = DES_KEY_SIZE,
3834 .setkey = sep_des_setkey,
3835 .encrypt = sep_des_ebc_encrypt,
3836 .decrypt = sep_des_ebc_decrypt,
3837 }
3838 },
3839 {
3840 .cra_name = "cbc(des)",
3841 .cra_driver_name = "cbc-des-sep",
3842 .cra_priority = 100,
3843 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3844 .cra_blocksize = DES_BLOCK_SIZE,
3845 .cra_ctxsize = sizeof(struct sep_system_ctx),
3846 .cra_alignmask = 0,
3847 .cra_type = &crypto_ablkcipher_type,
3848 .cra_module = THIS_MODULE,
3849 .cra_init = sep_crypto_init,
3850 .cra_exit = sep_crypto_exit,
3851 .cra_u.ablkcipher = {
3852 .min_keysize = DES_KEY_SIZE,
3853 .max_keysize = DES_KEY_SIZE,
3854 .setkey = sep_des_setkey,
3855 .encrypt = sep_des_cbc_encrypt,
3856 .ivsize = DES_BLOCK_SIZE,
3857 .decrypt = sep_des_cbc_decrypt,
3858 }
3859 },
3860 {
3861 .cra_name = "ebc(des3-ede)",
3862 .cra_driver_name = "ebc-des3-ede-sep",
3863 .cra_priority = 100,
3864 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3865 .cra_blocksize = DES_BLOCK_SIZE,
3866 .cra_ctxsize = sizeof(struct sep_system_ctx),
3867 .cra_alignmask = 0,
3868 .cra_type = &crypto_ablkcipher_type,
3869 .cra_module = THIS_MODULE,
3870 .cra_init = sep_crypto_init,
3871 .cra_exit = sep_crypto_exit,
3872 .cra_u.ablkcipher = {
3873 .min_keysize = DES3_EDE_KEY_SIZE,
3874 .max_keysize = DES3_EDE_KEY_SIZE,
3875 .setkey = sep_des_setkey,
3876 .encrypt = sep_des_ebc_encrypt,
3877 .decrypt = sep_des_ebc_decrypt,
3878 }
3879 },
3880 {
3881 .cra_name = "cbc(des3-ede)",
3882 .cra_driver_name = "cbc-des3--ede-sep",
3883 .cra_priority = 100,
3884 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3885 .cra_blocksize = DES_BLOCK_SIZE,
3886 .cra_ctxsize = sizeof(struct sep_system_ctx),
3887 .cra_alignmask = 0,
3888 .cra_type = &crypto_ablkcipher_type,
3889 .cra_module = THIS_MODULE,
3890 .cra_init = sep_crypto_init,
3891 .cra_exit = sep_crypto_exit,
3892 .cra_u.ablkcipher = {
3893 .min_keysize = DES3_EDE_KEY_SIZE,
3894 .max_keysize = DES3_EDE_KEY_SIZE,
3895 .setkey = sep_des_setkey,
3896 .encrypt = sep_des_cbc_encrypt,
3897 .decrypt = sep_des_cbc_decrypt,
3898 }
3899 }
3900 };
3901
3902 int sep_crypto_setup(void)
3903 {
3904 int err, i, j, k;
3905
3906 tasklet_init(&sep_dev->finish_tasklet, sep_finish,
3907 (unsigned long)sep_dev);
3908
3909 crypto_init_queue(&sep_queue, SEP_QUEUE_LENGTH);
3910
3911 sep_dev->workqueue = create_singlethread_workqueue(
3912 "sep_crypto_workqueue");
3913 if (!sep_dev->workqueue) {
3914 dev_warn(&sep_dev->pdev->dev, "cant create workqueue\n");
3915 return -ENOMEM;
3916 }
3917
3918 spin_lock_init(&queue_lock);
3919
3920 err = 0;
3921 for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
3922 err = crypto_register_ahash(&hash_algs[i]);
3923 if (err)
3924 goto err_algs;
3925 }
3926
3927 err = 0;
3928 for (j = 0; j < ARRAY_SIZE(crypto_algs); j++) {
3929 err = crypto_register_alg(&crypto_algs[j]);
3930 if (err)
3931 goto err_crypto_algs;
3932 }
3933
3934 return err;
3935
3936 err_algs:
3937 for (k = 0; k < i; k++)
3938 crypto_unregister_ahash(&hash_algs[k]);
3939 destroy_workqueue(sep_dev->workqueue);
3940 return err;
3941
3942 err_crypto_algs:
3943 for (k = 0; k < j; k++)
3944 crypto_unregister_alg(&crypto_algs[k]);
3945 goto err_algs;
3946 }
3947
3948 void sep_crypto_takedown(void)
3949 {
3950
3951 int i;
3952
3953 for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
3954 crypto_unregister_ahash(&hash_algs[i]);
3955 for (i = 0; i < ARRAY_SIZE(crypto_algs); i++)
3956 crypto_unregister_alg(&crypto_algs[i]);
3957
3958 destroy_workqueue(sep_dev->workqueue);
3959 tasklet_kill(&sep_dev->finish_tasklet);
3960 }
3961
3962 #endif
This page took 0.111914 seconds and 5 git commands to generate.