Fix common misspellings
[deliverable/linux.git] / drivers / staging / sep / sep_driver.c
1 /*
2 *
3 * sep_driver.c - Security Processor Driver main group of functions
4 *
5 * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009,2010 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * CONTACTS:
22 *
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 *
31 */
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/miscdevice.h>
35 #include <linux/fs.h>
36 #include <linux/cdev.h>
37 #include <linux/kdev_t.h>
38 #include <linux/mutex.h>
39 #include <linux/sched.h>
40 #include <linux/mm.h>
41 #include <linux/poll.h>
42 #include <linux/wait.h>
43 #include <linux/pci.h>
44 #include <linux/firmware.h>
45 #include <linux/slab.h>
46 #include <linux/ioctl.h>
47 #include <asm/current.h>
48 #include <linux/ioport.h>
49 #include <linux/io.h>
50 #include <linux/interrupt.h>
51 #include <linux/pagemap.h>
52 #include <asm/cacheflush.h>
53 #include <linux/sched.h>
54 #include <linux/delay.h>
55 #include <linux/jiffies.h>
56 #include <linux/rar_register.h>
57
58 #include "../memrar/memrar.h"
59
60 #include "sep_driver_hw_defs.h"
61 #include "sep_driver_config.h"
62 #include "sep_driver_api.h"
63 #include "sep_dev.h"
64
65 /*----------------------------------------
66 DEFINES
67 -----------------------------------------*/
68
69 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
70
71 /*--------------------------------------------
72 GLOBAL variables
73 --------------------------------------------*/
74
75 /* Keep this a single static object for now to keep the conversion easy */
76
77 static struct sep_device *sep_dev;
78
79 /**
80 * sep_dump_message - dump the message that is pending
81 * @sep: SEP device
82 */
83 static void sep_dump_message(struct sep_device *sep)
84 {
85 int count;
86 u32 *p = sep->shared_addr;
87 for (count = 0; count < 12 * 4; count += 4)
88 dev_dbg(&sep->pdev->dev, "Word %d of the message is %x\n",
89 count, *p++);
90 }
91
92 /**
93 * sep_map_and_alloc_shared_area - allocate shared block
94 * @sep: security processor
95 * @size: size of shared area
96 */
97 static int sep_map_and_alloc_shared_area(struct sep_device *sep)
98 {
99 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
100 sep->shared_size,
101 &sep->shared_bus, GFP_KERNEL);
102
103 if (!sep->shared_addr) {
104 dev_warn(&sep->pdev->dev,
105 "shared memory dma_alloc_coherent failed\n");
106 return -ENOMEM;
107 }
108 dev_dbg(&sep->pdev->dev,
109 "shared_addr %zx bytes @%p (bus %llx)\n",
110 sep->shared_size, sep->shared_addr,
111 (unsigned long long)sep->shared_bus);
112 return 0;
113 }
114
115 /**
116 * sep_unmap_and_free_shared_area - free shared block
117 * @sep: security processor
118 */
119 static void sep_unmap_and_free_shared_area(struct sep_device *sep)
120 {
121 dma_free_coherent(&sep->pdev->dev, sep->shared_size,
122 sep->shared_addr, sep->shared_bus);
123 }
124
125 /**
126 * sep_shared_bus_to_virt - convert bus/virt addresses
127 * @sep: pointer to struct sep_device
128 * @bus_address: address to convert
129 *
130 * Returns virtual address inside the shared area according
131 * to the bus address.
132 */
133 static void *sep_shared_bus_to_virt(struct sep_device *sep,
134 dma_addr_t bus_address)
135 {
136 return sep->shared_addr + (bus_address - sep->shared_bus);
137 }
138
139 /**
140 * open function for the singleton driver
141 * @inode_ptr struct inode *
142 * @file_ptr struct file *
143 *
144 * Called when the user opens the singleton device interface
145 */
146 static int sep_singleton_open(struct inode *inode_ptr, struct file *file_ptr)
147 {
148 struct sep_device *sep;
149
150 /*
151 * Get the SEP device structure and use it for the
152 * private_data field in filp for other methods
153 */
154 sep = sep_dev;
155
156 file_ptr->private_data = sep;
157
158 if (test_and_set_bit(0, &sep->singleton_access_flag))
159 return -EBUSY;
160 return 0;
161 }
162
163 /**
164 * sep_open - device open method
165 * @inode: inode of SEP device
166 * @filp: file handle to SEP device
167 *
168 * Open method for the SEP device. Called when userspace opens
169 * the SEP device node.
170 *
171 * Returns zero on success otherwise an error code.
172 */
173 static int sep_open(struct inode *inode, struct file *filp)
174 {
175 struct sep_device *sep;
176
177 /*
178 * Get the SEP device structure and use it for the
179 * private_data field in filp for other methods
180 */
181 sep = sep_dev;
182 filp->private_data = sep;
183
184 /* Anyone can open; locking takes place at transaction level */
185 return 0;
186 }
187
188 /**
189 * sep_singleton_release - close a SEP singleton device
190 * @inode: inode of SEP device
191 * @filp: file handle being closed
192 *
193 * Called on the final close of a SEP device. As the open protects against
194 * multiple simultaenous opens that means this method is called when the
195 * final reference to the open handle is dropped.
196 */
197 static int sep_singleton_release(struct inode *inode, struct file *filp)
198 {
199 struct sep_device *sep = filp->private_data;
200
201 clear_bit(0, &sep->singleton_access_flag);
202 return 0;
203 }
204
205 /**
206 * sep_request_daemonopen - request daemon open method
207 * @inode: inode of SEP device
208 * @filp: file handle to SEP device
209 *
210 * Open method for the SEP request daemon. Called when
211 * request daemon in userspace opens the SEP device node.
212 *
213 * Returns zero on success otherwise an error code.
214 */
215 static int sep_request_daemon_open(struct inode *inode, struct file *filp)
216 {
217 struct sep_device *sep = sep_dev;
218 int error = 0;
219
220 filp->private_data = sep;
221
222 /* There is supposed to be only one request daemon */
223 if (test_and_set_bit(0, &sep->request_daemon_open))
224 error = -EBUSY;
225 return error;
226 }
227
228 /**
229 * sep_request_daemon_release - close a SEP daemon
230 * @inode: inode of SEP device
231 * @filp: file handle being closed
232 *
233 * Called on the final close of a SEP daemon.
234 */
235 static int sep_request_daemon_release(struct inode *inode, struct file *filp)
236 {
237 struct sep_device *sep = filp->private_data;
238
239 dev_dbg(&sep->pdev->dev, "Request daemon release for pid %d\n",
240 current->pid);
241
242 /* Clear the request_daemon_open flag */
243 clear_bit(0, &sep->request_daemon_open);
244 return 0;
245 }
246
247 /**
248 * sep_req_daemon_send_reply_command_handler - poke the SEP
249 * @sep: struct sep_device *
250 *
251 * This function raises interrupt to SEPm that signals that is has a
252 * new command from HOST
253 */
254 static int sep_req_daemon_send_reply_command_handler(struct sep_device *sep)
255 {
256 unsigned long lck_flags;
257
258 sep_dump_message(sep);
259
260 /* Counters are lockable region */
261 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
262 sep->send_ct++;
263 sep->reply_ct++;
264
265 /* Send the interrupt to SEP */
266 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
267 sep->send_ct++;
268
269 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
270
271 dev_dbg(&sep->pdev->dev,
272 "sep_req_daemon_send_reply send_ct %lx reply_ct %lx\n",
273 sep->send_ct, sep->reply_ct);
274
275 return 0;
276 }
277
278
279 /**
280 * sep_free_dma_table_data_handler - free DMA table
281 * @sep: pointere to struct sep_device
282 *
283 * Handles the request to free DMA table for synchronic actions
284 */
285 static int sep_free_dma_table_data_handler(struct sep_device *sep)
286 {
287 int count;
288 int dcb_counter;
289 /* Pointer to the current dma_resource struct */
290 struct sep_dma_resource *dma;
291
292 for (dcb_counter = 0; dcb_counter < sep->nr_dcb_creat; dcb_counter++) {
293 dma = &sep->dma_res_arr[dcb_counter];
294
295 /* Unmap and free input map array */
296 if (dma->in_map_array) {
297 for (count = 0; count < dma->in_num_pages; count++) {
298 dma_unmap_page(&sep->pdev->dev,
299 dma->in_map_array[count].dma_addr,
300 dma->in_map_array[count].size,
301 DMA_TO_DEVICE);
302 }
303 kfree(dma->in_map_array);
304 }
305
306 /* Unmap output map array, DON'T free it yet */
307 if (dma->out_map_array) {
308 for (count = 0; count < dma->out_num_pages; count++) {
309 dma_unmap_page(&sep->pdev->dev,
310 dma->out_map_array[count].dma_addr,
311 dma->out_map_array[count].size,
312 DMA_FROM_DEVICE);
313 }
314 kfree(dma->out_map_array);
315 }
316
317 /* Free page cache for output */
318 if (dma->in_page_array) {
319 for (count = 0; count < dma->in_num_pages; count++) {
320 flush_dcache_page(dma->in_page_array[count]);
321 page_cache_release(dma->in_page_array[count]);
322 }
323 kfree(dma->in_page_array);
324 }
325
326 if (dma->out_page_array) {
327 for (count = 0; count < dma->out_num_pages; count++) {
328 if (!PageReserved(dma->out_page_array[count]))
329 SetPageDirty(dma->out_page_array[count]);
330 flush_dcache_page(dma->out_page_array[count]);
331 page_cache_release(dma->out_page_array[count]);
332 }
333 kfree(dma->out_page_array);
334 }
335
336 /* Reset all the values */
337 dma->in_page_array = NULL;
338 dma->out_page_array = NULL;
339 dma->in_num_pages = 0;
340 dma->out_num_pages = 0;
341 dma->in_map_array = NULL;
342 dma->out_map_array = NULL;
343 dma->in_map_num_entries = 0;
344 dma->out_map_num_entries = 0;
345 }
346
347 sep->nr_dcb_creat = 0;
348 sep->num_lli_tables_created = 0;
349
350 return 0;
351 }
352
353 /**
354 * sep_request_daemon_mmap - maps the shared area to user space
355 * @filp: pointer to struct file
356 * @vma: pointer to vm_area_struct
357 *
358 * Called by the kernel when the daemon attempts an mmap() syscall
359 * using our handle.
360 */
361 static int sep_request_daemon_mmap(struct file *filp,
362 struct vm_area_struct *vma)
363 {
364 struct sep_device *sep = filp->private_data;
365 dma_addr_t bus_address;
366 int error = 0;
367
368 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
369 error = -EINVAL;
370 goto end_function;
371 }
372
373 /* Get physical address */
374 bus_address = sep->shared_bus;
375
376 if (remap_pfn_range(vma, vma->vm_start, bus_address >> PAGE_SHIFT,
377 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
378
379 dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
380 error = -EAGAIN;
381 goto end_function;
382 }
383
384 end_function:
385 return error;
386 }
387
388 /**
389 * sep_request_daemon_poll - poll implementation
390 * @sep: struct sep_device * for current SEP device
391 * @filp: struct file * for open file
392 * @wait: poll_table * for poll
393 *
394 * Called when our device is part of a poll() or select() syscall
395 */
396 static unsigned int sep_request_daemon_poll(struct file *filp,
397 poll_table *wait)
398 {
399 u32 mask = 0;
400 /* GPR2 register */
401 u32 retval2;
402 unsigned long lck_flags;
403 struct sep_device *sep = filp->private_data;
404
405 poll_wait(filp, &sep->event_request_daemon, wait);
406
407 dev_dbg(&sep->pdev->dev, "daemon poll: send_ct is %lx reply ct is %lx\n",
408 sep->send_ct, sep->reply_ct);
409
410 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
411 /* Check if the data is ready */
412 if (sep->send_ct == sep->reply_ct) {
413 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
414
415 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
416 dev_dbg(&sep->pdev->dev,
417 "daemon poll: data check (GPR2) is %x\n", retval2);
418
419 /* Check if PRINT request */
420 if ((retval2 >> 30) & 0x1) {
421 dev_dbg(&sep->pdev->dev, "daemon poll: PRINTF request in\n");
422 mask |= POLLIN;
423 goto end_function;
424 }
425 /* Check if NVS request */
426 if (retval2 >> 31) {
427 dev_dbg(&sep->pdev->dev, "daemon poll: NVS request in\n");
428 mask |= POLLPRI | POLLWRNORM;
429 }
430 } else {
431 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
432 dev_dbg(&sep->pdev->dev,
433 "daemon poll: no reply received; returning 0\n");
434 mask = 0;
435 }
436 end_function:
437 return mask;
438 }
439
440 /**
441 * sep_release - close a SEP device
442 * @inode: inode of SEP device
443 * @filp: file handle being closed
444 *
445 * Called on the final close of a SEP device.
446 */
447 static int sep_release(struct inode *inode, struct file *filp)
448 {
449 struct sep_device *sep = filp->private_data;
450
451 dev_dbg(&sep->pdev->dev, "Release for pid %d\n", current->pid);
452
453 mutex_lock(&sep->sep_mutex);
454 /* Is this the process that has a transaction open?
455 * If so, lets reset pid_doing_transaction to 0 and
456 * clear the in use flags, and then wake up sep_event
457 * so that other processes can do transactions
458 */
459 if (sep->pid_doing_transaction == current->pid) {
460 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
461 clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
462 sep_free_dma_table_data_handler(sep);
463 wake_up(&sep->event);
464 sep->pid_doing_transaction = 0;
465 }
466
467 mutex_unlock(&sep->sep_mutex);
468 return 0;
469 }
470
471 /**
472 * sep_mmap - maps the shared area to user space
473 * @filp: pointer to struct file
474 * @vma: pointer to vm_area_struct
475 *
476 * Called on an mmap of our space via the normal SEP device
477 */
478 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
479 {
480 dma_addr_t bus_addr;
481 struct sep_device *sep = filp->private_data;
482 unsigned long error = 0;
483
484 /* Set the transaction busy (own the device) */
485 wait_event_interruptible(sep->event,
486 test_and_set_bit(SEP_MMAP_LOCK_BIT,
487 &sep->in_use_flags) == 0);
488
489 if (signal_pending(current)) {
490 error = -EINTR;
491 goto end_function_with_error;
492 }
493 /*
494 * The pid_doing_transaction indicates that this process
495 * now owns the facilities to performa a transaction with
496 * the SEP. While this process is performing a transaction,
497 * no other process who has the SEP device open can perform
498 * any transactions. This method allows more than one process
499 * to have the device open at any given time, which provides
500 * finer granularity for device utilization by multiple
501 * processes.
502 */
503 mutex_lock(&sep->sep_mutex);
504 sep->pid_doing_transaction = current->pid;
505 mutex_unlock(&sep->sep_mutex);
506
507 /* Zero the pools and the number of data pool alocation pointers */
508 sep->data_pool_bytes_allocated = 0;
509 sep->num_of_data_allocations = 0;
510
511 /*
512 * Check that the size of the mapped range is as the size of the message
513 * shared area
514 */
515 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
516 error = -EINVAL;
517 goto end_function_with_error;
518 }
519
520 dev_dbg(&sep->pdev->dev, "shared_addr is %p\n", sep->shared_addr);
521
522 /* Get bus address */
523 bus_addr = sep->shared_bus;
524
525 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
526 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
527 dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
528 error = -EAGAIN;
529 goto end_function_with_error;
530 }
531 goto end_function;
532
533 end_function_with_error:
534 /* Clear the bit */
535 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
536 mutex_lock(&sep->sep_mutex);
537 sep->pid_doing_transaction = 0;
538 mutex_unlock(&sep->sep_mutex);
539
540 /* Raise event for stuck contextes */
541
542 wake_up(&sep->event);
543
544 end_function:
545 return error;
546 }
547
548 /**
549 * sep_poll - poll handler
550 * @filp: pointer to struct file
551 * @wait: pointer to poll_table
552 *
553 * Called by the OS when the kernel is asked to do a poll on
554 * a SEP file handle.
555 */
556 static unsigned int sep_poll(struct file *filp, poll_table *wait)
557 {
558 u32 mask = 0;
559 u32 retval = 0;
560 u32 retval2 = 0;
561 unsigned long lck_flags;
562
563 struct sep_device *sep = filp->private_data;
564
565 /* Am I the process that owns the transaction? */
566 mutex_lock(&sep->sep_mutex);
567 if (current->pid != sep->pid_doing_transaction) {
568 dev_dbg(&sep->pdev->dev, "poll; wrong pid\n");
569 mask = POLLERR;
570 mutex_unlock(&sep->sep_mutex);
571 goto end_function;
572 }
573 mutex_unlock(&sep->sep_mutex);
574
575 /* Check if send command or send_reply were activated previously */
576 if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
577 mask = POLLERR;
578 goto end_function;
579 }
580
581 /* Add the event to the polling wait table */
582 dev_dbg(&sep->pdev->dev, "poll: calling wait sep_event\n");
583
584 poll_wait(filp, &sep->event, wait);
585
586 dev_dbg(&sep->pdev->dev, "poll: send_ct is %lx reply ct is %lx\n",
587 sep->send_ct, sep->reply_ct);
588
589 /* Check if error occurred during poll */
590 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
591 if (retval2 != 0x0) {
592 dev_warn(&sep->pdev->dev, "poll; poll error %x\n", retval2);
593 mask |= POLLERR;
594 goto end_function;
595 }
596
597 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
598
599 if (sep->send_ct == sep->reply_ct) {
600 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
601 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
602 dev_dbg(&sep->pdev->dev, "poll: data ready check (GPR2) %x\n",
603 retval);
604
605 /* Check if printf request */
606 if ((retval >> 30) & 0x1) {
607 dev_dbg(&sep->pdev->dev, "poll: SEP printf request\n");
608 wake_up(&sep->event_request_daemon);
609 goto end_function;
610 }
611
612 /* Check if the this is SEP reply or request */
613 if (retval >> 31) {
614 dev_dbg(&sep->pdev->dev, "poll: SEP request\n");
615 wake_up(&sep->event_request_daemon);
616 } else {
617 dev_dbg(&sep->pdev->dev, "poll: normal return\n");
618 /* In case it is again by send_reply_comand */
619 clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
620 sep_dump_message(sep);
621 dev_dbg(&sep->pdev->dev,
622 "poll; SEP reply POLLIN | POLLRDNORM\n");
623 mask |= POLLIN | POLLRDNORM;
624 }
625 } else {
626 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
627 dev_dbg(&sep->pdev->dev,
628 "poll; no reply received; returning mask of 0\n");
629 mask = 0;
630 }
631
632 end_function:
633 return mask;
634 }
635
636 /**
637 * sep_time_address - address in SEP memory of time
638 * @sep: SEP device we want the address from
639 *
640 * Return the address of the two dwords in memory used for time
641 * setting.
642 */
643 static u32 *sep_time_address(struct sep_device *sep)
644 {
645 return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
646 }
647
648 /**
649 * sep_set_time - set the SEP time
650 * @sep: the SEP we are setting the time for
651 *
652 * Calculates time and sets it at the predefined address.
653 * Called with the SEP mutex held.
654 */
655 static unsigned long sep_set_time(struct sep_device *sep)
656 {
657 struct timeval time;
658 u32 *time_addr; /* Address of time as seen by the kernel */
659
660
661 do_gettimeofday(&time);
662
663 /* Set value in the SYSTEM MEMORY offset */
664 time_addr = sep_time_address(sep);
665
666 time_addr[0] = SEP_TIME_VAL_TOKEN;
667 time_addr[1] = time.tv_sec;
668
669 dev_dbg(&sep->pdev->dev, "time.tv_sec is %lu\n", time.tv_sec);
670 dev_dbg(&sep->pdev->dev, "time_addr is %p\n", time_addr);
671 dev_dbg(&sep->pdev->dev, "sep->shared_addr is %p\n", sep->shared_addr);
672
673 return time.tv_sec;
674 }
675
676 /**
677 * sep_set_caller_id_handler - insert caller id entry
678 * @sep: SEP device
679 * @arg: pointer to struct caller_id_struct
680 *
681 * Inserts the data into the caller id table. Note that this function
682 * falls under the ioctl lock
683 */
684 static int sep_set_caller_id_handler(struct sep_device *sep, unsigned long arg)
685 {
686 void __user *hash;
687 int error = 0;
688 int i;
689 struct caller_id_struct command_args;
690
691 for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
692 if (sep->caller_id_table[i].pid == 0)
693 break;
694 }
695
696 if (i == SEP_CALLER_ID_TABLE_NUM_ENTRIES) {
697 dev_dbg(&sep->pdev->dev, "no more caller id entries left\n");
698 dev_dbg(&sep->pdev->dev, "maximum number is %d\n",
699 SEP_CALLER_ID_TABLE_NUM_ENTRIES);
700 error = -EUSERS;
701 goto end_function;
702 }
703
704 /* Copy the data */
705 if (copy_from_user(&command_args, (void __user *)arg,
706 sizeof(command_args))) {
707 error = -EFAULT;
708 goto end_function;
709 }
710
711 hash = (void __user *)(unsigned long)command_args.callerIdAddress;
712
713 if (!command_args.pid || !command_args.callerIdSizeInBytes) {
714 error = -EINVAL;
715 goto end_function;
716 }
717
718 dev_dbg(&sep->pdev->dev, "pid is %x\n", command_args.pid);
719 dev_dbg(&sep->pdev->dev, "callerIdSizeInBytes is %x\n",
720 command_args.callerIdSizeInBytes);
721
722 if (command_args.callerIdSizeInBytes >
723 SEP_CALLER_ID_HASH_SIZE_IN_BYTES) {
724 error = -EMSGSIZE;
725 goto end_function;
726 }
727
728 sep->caller_id_table[i].pid = command_args.pid;
729
730 if (copy_from_user(sep->caller_id_table[i].callerIdHash,
731 hash, command_args.callerIdSizeInBytes))
732 error = -EFAULT;
733 end_function:
734 return error;
735 }
736
737 /**
738 * sep_set_current_caller_id - set the caller id
739 * @sep: pointer to struct_sep_device
740 *
741 * Set the caller ID (if it exists) to the SEP. Note that this
742 * function falls under the ioctl lock
743 */
744 static int sep_set_current_caller_id(struct sep_device *sep)
745 {
746 int i;
747 u32 *hash_buf_ptr;
748
749 /* Zero the previous value */
750 memset(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
751 0, SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
752
753 for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
754 if (sep->caller_id_table[i].pid == current->pid) {
755 dev_dbg(&sep->pdev->dev, "Caller Id found\n");
756
757 memcpy(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
758 (void *)(sep->caller_id_table[i].callerIdHash),
759 SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
760 break;
761 }
762 }
763 /* Ensure data is in little endian */
764 hash_buf_ptr = (u32 *)sep->shared_addr +
765 SEP_CALLER_ID_OFFSET_BYTES;
766
767 for (i = 0; i < SEP_CALLER_ID_HASH_SIZE_IN_WORDS; i++)
768 hash_buf_ptr[i] = cpu_to_le32(hash_buf_ptr[i]);
769
770 return 0;
771 }
772
773 /**
774 * sep_send_command_handler - kick off a command
775 * @sep: SEP being signalled
776 *
777 * This function raises interrupt to SEP that signals that is has a new
778 * command from the host
779 *
780 * Note that this function does fall under the ioctl lock
781 */
782 static int sep_send_command_handler(struct sep_device *sep)
783 {
784 unsigned long lck_flags;
785 int error = 0;
786
787 if (test_and_set_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
788 error = -EPROTO;
789 goto end_function;
790 }
791 sep_set_time(sep);
792
793 sep_set_current_caller_id(sep);
794
795 sep_dump_message(sep);
796
797 /* Update counter */
798 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
799 sep->send_ct++;
800 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
801
802 dev_dbg(&sep->pdev->dev,
803 "sep_send_command_handler send_ct %lx reply_ct %lx\n",
804 sep->send_ct, sep->reply_ct);
805
806 /* Send interrupt to SEP */
807 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
808
809 end_function:
810 return error;
811 }
812
813 /**
814 * sep_allocate_data_pool_memory_handler -allocate pool memory
815 * @sep: pointer to struct sep_device
816 * @arg: pointer to struct alloc_struct
817 *
818 * This function handles the allocate data pool memory request
819 * This function returns calculates the bus address of the
820 * allocated memory, and the offset of this area from the mapped address.
821 * Therefore, the FVOs in user space can calculate the exact virtual
822 * address of this allocated memory
823 */
824 static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
825 unsigned long arg)
826 {
827 int error = 0;
828 struct alloc_struct command_args;
829
830 /* Holds the allocated buffer address in the system memory pool */
831 u32 *token_addr;
832
833 if (copy_from_user(&command_args, (void __user *)arg,
834 sizeof(struct alloc_struct))) {
835 error = -EFAULT;
836 goto end_function;
837 }
838
839 /* Allocate memory */
840 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) >
841 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
842 error = -ENOMEM;
843 goto end_function;
844 }
845
846 dev_dbg(&sep->pdev->dev,
847 "data pool bytes_allocated: %x\n", (int)sep->data_pool_bytes_allocated);
848 dev_dbg(&sep->pdev->dev,
849 "offset: %x\n", SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES);
850 /* Set the virtual and bus address */
851 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
852 sep->data_pool_bytes_allocated;
853
854 /* Place in the shared area that is known by the SEP */
855 token_addr = (u32 *)(sep->shared_addr +
856 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES +
857 (sep->num_of_data_allocations)*2*sizeof(u32));
858
859 token_addr[0] = SEP_DATA_POOL_POINTERS_VAL_TOKEN;
860 token_addr[1] = (u32)sep->shared_bus +
861 SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
862 sep->data_pool_bytes_allocated;
863
864 /* Write the memory back to the user space */
865 error = copy_to_user((void *)arg, (void *)&command_args,
866 sizeof(struct alloc_struct));
867 if (error) {
868 error = -EFAULT;
869 goto end_function;
870 }
871
872 /* Update the allocation */
873 sep->data_pool_bytes_allocated += command_args.num_bytes;
874 sep->num_of_data_allocations += 1;
875
876 end_function:
877 return error;
878 }
879
880 /**
881 * sep_lock_kernel_pages - map kernel pages for DMA
882 * @sep: pointer to struct sep_device
883 * @kernel_virt_addr: address of data buffer in kernel
884 * @data_size: size of data
885 * @lli_array_ptr: lli array
886 * @in_out_flag: input into device or output from device
887 *
888 * This function locks all the physical pages of the kernel virtual buffer
889 * and construct a basic lli array, where each entry holds the physical
890 * page address and the size that application data holds in this page
891 * This function is used only during kernel crypto mod calls from within
892 * the kernel (when ioctl is not used)
893 */
894 static int sep_lock_kernel_pages(struct sep_device *sep,
895 unsigned long kernel_virt_addr,
896 u32 data_size,
897 struct sep_lli_entry **lli_array_ptr,
898 int in_out_flag)
899
900 {
901 int error = 0;
902 /* Array of lli */
903 struct sep_lli_entry *lli_array;
904 /* Map array */
905 struct sep_dma_map *map_array;
906
907 dev_dbg(&sep->pdev->dev, "lock kernel pages kernel_virt_addr is %08lx\n",
908 (unsigned long)kernel_virt_addr);
909 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
910
911 lli_array = kmalloc(sizeof(struct sep_lli_entry), GFP_ATOMIC);
912 if (!lli_array) {
913 error = -ENOMEM;
914 goto end_function;
915 }
916 map_array = kmalloc(sizeof(struct sep_dma_map), GFP_ATOMIC);
917 if (!map_array) {
918 error = -ENOMEM;
919 goto end_function_with_error;
920 }
921
922 map_array[0].dma_addr =
923 dma_map_single(&sep->pdev->dev, (void *)kernel_virt_addr,
924 data_size, DMA_BIDIRECTIONAL);
925 map_array[0].size = data_size;
926
927
928 /*
929 * Set the start address of the first page - app data may start not at
930 * the beginning of the page
931 */
932 lli_array[0].bus_address = (u32)map_array[0].dma_addr;
933 lli_array[0].block_size = map_array[0].size;
934
935 dev_dbg(&sep->pdev->dev,
936 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
937 (unsigned long)lli_array[0].bus_address,
938 lli_array[0].block_size);
939
940 /* Set the output parameters */
941 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
942 *lli_array_ptr = lli_array;
943 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 1;
944 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
945 sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
946 sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries = 1;
947 } else {
948 *lli_array_ptr = lli_array;
949 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = 1;
950 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
951 sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
952 sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries = 1;
953 }
954 goto end_function;
955
956 end_function_with_error:
957 kfree(lli_array);
958
959 end_function:
960 return error;
961 }
962
963 /**
964 * sep_lock_user_pages - lock and map user pages for DMA
965 * @sep: pointer to struct sep_device
966 * @app_virt_addr: user memory data buffer
967 * @data_size: size of data buffer
968 * @lli_array_ptr: lli array
969 * @in_out_flag: input or output to device
970 *
971 * This function locks all the physical pages of the application
972 * virtual buffer and construct a basic lli array, where each entry
973 * holds the physical page address and the size that application
974 * data holds in this physical pages
975 */
976 static int sep_lock_user_pages(struct sep_device *sep,
977 u32 app_virt_addr,
978 u32 data_size,
979 struct sep_lli_entry **lli_array_ptr,
980 int in_out_flag)
981
982 {
983 int error = 0;
984 u32 count;
985 int result;
986 /* The the page of the end address of the user space buffer */
987 u32 end_page;
988 /* The page of the start address of the user space buffer */
989 u32 start_page;
990 /* The range in pages */
991 u32 num_pages;
992 /* Array of pointers to page */
993 struct page **page_array;
994 /* Array of lli */
995 struct sep_lli_entry *lli_array;
996 /* Map array */
997 struct sep_dma_map *map_array;
998 /* Direction of the DMA mapping for locked pages */
999 enum dma_data_direction dir;
1000
1001 /* Set start and end pages and num pages */
1002 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1003 start_page = app_virt_addr >> PAGE_SHIFT;
1004 num_pages = end_page - start_page + 1;
1005
1006 dev_dbg(&sep->pdev->dev, "lock user pages app_virt_addr is %x\n", app_virt_addr);
1007 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
1008 dev_dbg(&sep->pdev->dev, "start_page is %x\n", start_page);
1009 dev_dbg(&sep->pdev->dev, "end_page is %x\n", end_page);
1010 dev_dbg(&sep->pdev->dev, "num_pages is %x\n", num_pages);
1011
1012 /* Allocate array of pages structure pointers */
1013 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
1014 if (!page_array) {
1015 error = -ENOMEM;
1016 goto end_function;
1017 }
1018 map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
1019 if (!map_array) {
1020 dev_warn(&sep->pdev->dev, "kmalloc for map_array failed\n");
1021 error = -ENOMEM;
1022 goto end_function_with_error1;
1023 }
1024
1025 lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
1026 GFP_ATOMIC);
1027
1028 if (!lli_array) {
1029 dev_warn(&sep->pdev->dev, "kmalloc for lli_array failed\n");
1030 error = -ENOMEM;
1031 goto end_function_with_error2;
1032 }
1033
1034 /* Convert the application virtual address into a set of physical */
1035 down_read(&current->mm->mmap_sem);
1036 result = get_user_pages(current, current->mm, app_virt_addr,
1037 num_pages,
1038 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
1039 0, page_array, NULL);
1040
1041 up_read(&current->mm->mmap_sem);
1042
1043 /* Check the number of pages locked - if not all then exit with error */
1044 if (result != num_pages) {
1045 dev_warn(&sep->pdev->dev,
1046 "not all pages locked by get_user_pages\n");
1047 error = -ENOMEM;
1048 goto end_function_with_error3;
1049 }
1050
1051 dev_dbg(&sep->pdev->dev, "get_user_pages succeeded\n");
1052
1053 /* Set direction */
1054 if (in_out_flag == SEP_DRIVER_IN_FLAG)
1055 dir = DMA_TO_DEVICE;
1056 else
1057 dir = DMA_FROM_DEVICE;
1058
1059 /*
1060 * Fill the array using page array data and
1061 * map the pages - this action will also flush the cache as needed
1062 */
1063 for (count = 0; count < num_pages; count++) {
1064 /* Fill the map array */
1065 map_array[count].dma_addr =
1066 dma_map_page(&sep->pdev->dev, page_array[count],
1067 0, PAGE_SIZE, /*dir*/DMA_BIDIRECTIONAL);
1068
1069 map_array[count].size = PAGE_SIZE;
1070
1071 /* Fill the lli array entry */
1072 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1073 lli_array[count].block_size = PAGE_SIZE;
1074
1075 dev_warn(&sep->pdev->dev, "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
1076 count, (unsigned long)lli_array[count].bus_address,
1077 count, lli_array[count].block_size);
1078 }
1079
1080 /* Check the offset for the first page */
1081 lli_array[0].bus_address =
1082 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1083
1084 /* Check that not all the data is in the first page only */
1085 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1086 lli_array[0].block_size = data_size;
1087 else
1088 lli_array[0].block_size =
1089 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1090
1091 dev_dbg(&sep->pdev->dev,
1092 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
1093 (unsigned long)lli_array[count].bus_address,
1094 lli_array[count].block_size);
1095
1096 /* Check the size of the last page */
1097 if (num_pages > 1) {
1098 lli_array[num_pages - 1].block_size =
1099 (app_virt_addr + data_size) & (~PAGE_MASK);
1100
1101 dev_warn(&sep->pdev->dev,
1102 "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
1103 num_pages - 1,
1104 (unsigned long)lli_array[count].bus_address,
1105 num_pages - 1,
1106 lli_array[count].block_size);
1107 }
1108
1109 /* Set output params according to the in_out flag */
1110 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1111 *lli_array_ptr = lli_array;
1112 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = num_pages;
1113 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = page_array;
1114 sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
1115 sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries =
1116 num_pages;
1117 } else {
1118 *lli_array_ptr = lli_array;
1119 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = num_pages;
1120 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array =
1121 page_array;
1122 sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
1123 sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries =
1124 num_pages;
1125 }
1126 goto end_function;
1127
1128 end_function_with_error3:
1129 /* Free lli array */
1130 kfree(lli_array);
1131
1132 end_function_with_error2:
1133 kfree(map_array);
1134
1135 end_function_with_error1:
1136 /* Free page array */
1137 kfree(page_array);
1138
1139 end_function:
1140 return error;
1141 }
1142
1143 /**
1144 * u32 sep_calculate_lli_table_max_size - size the LLI table
1145 * @sep: pointer to struct sep_device
1146 * @lli_in_array_ptr
1147 * @num_array_entries
1148 * @last_table_flag
1149 *
1150 * This function calculates the size of data that can be inserted into
1151 * the lli table from this array, such that either the table is full
1152 * (all entries are entered), or there are no more entries in the
1153 * lli array
1154 */
1155 static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1156 struct sep_lli_entry *lli_in_array_ptr,
1157 u32 num_array_entries,
1158 u32 *last_table_flag)
1159 {
1160 u32 counter;
1161 /* Table data size */
1162 u32 table_data_size = 0;
1163 /* Data size for the next table */
1164 u32 next_table_data_size;
1165
1166 *last_table_flag = 0;
1167
1168 /*
1169 * Calculate the data in the out lli table till we fill the whole
1170 * table or till the data has ended
1171 */
1172 for (counter = 0;
1173 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1174 (counter < num_array_entries); counter++)
1175 table_data_size += lli_in_array_ptr[counter].block_size;
1176
1177 /*
1178 * Check if we reached the last entry,
1179 * meaning this ia the last table to build,
1180 * and no need to check the block alignment
1181 */
1182 if (counter == num_array_entries) {
1183 /* Set the last table flag */
1184 *last_table_flag = 1;
1185 goto end_function;
1186 }
1187
1188 /*
1189 * Calculate the data size of the next table.
1190 * Stop if no entries left or if data size is more the DMA restriction
1191 */
1192 next_table_data_size = 0;
1193 for (; counter < num_array_entries; counter++) {
1194 next_table_data_size += lli_in_array_ptr[counter].block_size;
1195 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1196 break;
1197 }
1198
1199 /*
1200 * Check if the next table data size is less then DMA rstriction.
1201 * if it is - recalculate the current table size, so that the next
1202 * table data size will be adaquete for DMA
1203 */
1204 if (next_table_data_size &&
1205 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1206
1207 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1208 next_table_data_size);
1209
1210 end_function:
1211 return table_data_size;
1212 }
1213
1214 /**
1215 * sep_build_lli_table - build an lli array for the given table
1216 * @sep: pointer to struct sep_device
1217 * @lli_array_ptr: pointer to lli array
1218 * @lli_table_ptr: pointer to lli table
1219 * @num_processed_entries_ptr: pointer to number of entries
1220 * @num_table_entries_ptr: pointer to number of tables
1221 * @table_data_size: total data size
1222 *
1223 * Builds ant lli table from the lli_array according to
1224 * the given size of data
1225 */
1226 static void sep_build_lli_table(struct sep_device *sep,
1227 struct sep_lli_entry *lli_array_ptr,
1228 struct sep_lli_entry *lli_table_ptr,
1229 u32 *num_processed_entries_ptr,
1230 u32 *num_table_entries_ptr,
1231 u32 table_data_size)
1232 {
1233 /* Current table data size */
1234 u32 curr_table_data_size;
1235 /* Counter of lli array entry */
1236 u32 array_counter;
1237
1238 /* Init currrent table data size and lli array entry counter */
1239 curr_table_data_size = 0;
1240 array_counter = 0;
1241 *num_table_entries_ptr = 1;
1242
1243 dev_dbg(&sep->pdev->dev, "build lli table table_data_size is %x\n", table_data_size);
1244
1245 /* Fill the table till table size reaches the needed amount */
1246 while (curr_table_data_size < table_data_size) {
1247 /* Update the number of entries in table */
1248 (*num_table_entries_ptr)++;
1249
1250 lli_table_ptr->bus_address =
1251 cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1252
1253 lli_table_ptr->block_size =
1254 cpu_to_le32(lli_array_ptr[array_counter].block_size);
1255
1256 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1257
1258 dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n",
1259 lli_table_ptr);
1260 dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
1261 (unsigned long)lli_table_ptr->bus_address);
1262 dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
1263 lli_table_ptr->block_size);
1264
1265 /* Check for overflow of the table data */
1266 if (curr_table_data_size > table_data_size) {
1267 dev_dbg(&sep->pdev->dev,
1268 "curr_table_data_size too large\n");
1269
1270 /* Update the size of block in the table */
1271 lli_table_ptr->block_size -=
1272 cpu_to_le32((curr_table_data_size - table_data_size));
1273
1274 /* Update the physical address in the lli array */
1275 lli_array_ptr[array_counter].bus_address +=
1276 cpu_to_le32(lli_table_ptr->block_size);
1277
1278 /* Update the block size left in the lli array */
1279 lli_array_ptr[array_counter].block_size =
1280 (curr_table_data_size - table_data_size);
1281 } else
1282 /* Advance to the next entry in the lli_array */
1283 array_counter++;
1284
1285 dev_dbg(&sep->pdev->dev,
1286 "lli_table_ptr->bus_address is %08lx\n",
1287 (unsigned long)lli_table_ptr->bus_address);
1288 dev_dbg(&sep->pdev->dev,
1289 "lli_table_ptr->block_size is %x\n",
1290 lli_table_ptr->block_size);
1291
1292 /* Move to the next entry in table */
1293 lli_table_ptr++;
1294 }
1295
1296 /* Set the info entry to default */
1297 lli_table_ptr->bus_address = 0xffffffff;
1298 lli_table_ptr->block_size = 0;
1299
1300 /* Set the output parameter */
1301 *num_processed_entries_ptr += array_counter;
1302
1303 }
1304
1305 /**
1306 * sep_shared_area_virt_to_bus - map shared area to bus address
1307 * @sep: pointer to struct sep_device
1308 * @virt_address: virtual address to convert
1309 *
1310 * This functions returns the physical address inside shared area according
1311 * to the virtual address. It can be either on the externa RAM device
1312 * (ioremapped), or on the system RAM
1313 * This implementation is for the external RAM
1314 */
1315 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1316 void *virt_address)
1317 {
1318 dev_dbg(&sep->pdev->dev, "sh virt to phys v %p\n", virt_address);
1319 dev_dbg(&sep->pdev->dev, "sh virt to phys p %08lx\n",
1320 (unsigned long)
1321 sep->shared_bus + (virt_address - sep->shared_addr));
1322
1323 return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1324 }
1325
1326 /**
1327 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1328 * @sep: pointer to struct sep_device
1329 * @bus_address: bus address to convert
1330 *
1331 * This functions returns the virtual address inside shared area
1332 * according to the physical address. It can be either on the
1333 * externa RAM device (ioremapped), or on the system RAM
1334 * This implementation is for the external RAM
1335 */
1336 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1337 dma_addr_t bus_address)
1338 {
1339 dev_dbg(&sep->pdev->dev, "shared bus to virt b=%lx v=%lx\n",
1340 (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1341 (size_t)(bus_address - sep->shared_bus)));
1342
1343 return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1344 }
1345
1346 /**
1347 * sep_debug_print_lli_tables - dump LLI table
1348 * @sep: pointer to struct sep_device
1349 * @lli_table_ptr: pointer to sep_lli_entry
1350 * @num_table_entries: number of entries
1351 * @table_data_size: total data size
1352 *
1353 * Walk the the list of the print created tables and print all the data
1354 */
1355 static void sep_debug_print_lli_tables(struct sep_device *sep,
1356 struct sep_lli_entry *lli_table_ptr,
1357 unsigned long num_table_entries,
1358 unsigned long table_data_size)
1359 {
1360 unsigned long table_count = 1;
1361 unsigned long entries_count = 0;
1362
1363 dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables start\n");
1364
1365 while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1366 dev_dbg(&sep->pdev->dev,
1367 "lli table %08lx, table_data_size is %lu\n",
1368 table_count, table_data_size);
1369 dev_dbg(&sep->pdev->dev, "num_table_entries is %lu\n",
1370 num_table_entries);
1371
1372 /* Print entries of the table (without info entry) */
1373 for (entries_count = 0; entries_count < num_table_entries;
1374 entries_count++, lli_table_ptr++) {
1375
1376 dev_dbg(&sep->pdev->dev,
1377 "lli_table_ptr address is %08lx\n",
1378 (unsigned long) lli_table_ptr);
1379
1380 dev_dbg(&sep->pdev->dev,
1381 "phys address is %08lx block size is %x\n",
1382 (unsigned long)lli_table_ptr->bus_address,
1383 lli_table_ptr->block_size);
1384 }
1385 /* Point to the info entry */
1386 lli_table_ptr--;
1387
1388 dev_dbg(&sep->pdev->dev,
1389 "phys lli_table_ptr->block_size is %x\n",
1390 lli_table_ptr->block_size);
1391
1392 dev_dbg(&sep->pdev->dev,
1393 "phys lli_table_ptr->physical_address is %08lu\n",
1394 (unsigned long)lli_table_ptr->bus_address);
1395
1396
1397 table_data_size = lli_table_ptr->block_size & 0xffffff;
1398 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1399
1400 dev_dbg(&sep->pdev->dev,
1401 "phys table_data_size is %lu num_table_entries is"
1402 " %lu bus_address is%lu\n", table_data_size,
1403 num_table_entries, (unsigned long)lli_table_ptr->bus_address);
1404
1405 if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1406 lli_table_ptr = (struct sep_lli_entry *)
1407 sep_shared_bus_to_virt(sep,
1408 (unsigned long)lli_table_ptr->bus_address);
1409
1410 table_count++;
1411 }
1412 dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables end\n");
1413 }
1414
1415
1416 /**
1417 * sep_prepare_empty_lli_table - create a blank LLI table
1418 * @sep: pointer to struct sep_device
1419 * @lli_table_addr_ptr: pointer to lli table
1420 * @num_entries_ptr: pointer to number of entries
1421 * @table_data_size_ptr: point to table data size
1422 *
1423 * This function creates empty lli tables when there is no data
1424 */
1425 static void sep_prepare_empty_lli_table(struct sep_device *sep,
1426 dma_addr_t *lli_table_addr_ptr,
1427 u32 *num_entries_ptr,
1428 u32 *table_data_size_ptr)
1429 {
1430 struct sep_lli_entry *lli_table_ptr;
1431
1432 /* Find the area for new table */
1433 lli_table_ptr =
1434 (struct sep_lli_entry *)(sep->shared_addr +
1435 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1436 sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1437 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1438
1439 lli_table_ptr->bus_address = 0;
1440 lli_table_ptr->block_size = 0;
1441
1442 lli_table_ptr++;
1443 lli_table_ptr->bus_address = 0xFFFFFFFF;
1444 lli_table_ptr->block_size = 0;
1445
1446 /* Set the output parameter value */
1447 *lli_table_addr_ptr = sep->shared_bus +
1448 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1449 sep->num_lli_tables_created *
1450 sizeof(struct sep_lli_entry) *
1451 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1452
1453 /* Set the num of entries and table data size for empty table */
1454 *num_entries_ptr = 2;
1455 *table_data_size_ptr = 0;
1456
1457 /* Update the number of created tables */
1458 sep->num_lli_tables_created++;
1459 }
1460
1461 /**
1462 * sep_prepare_input_dma_table - prepare input DMA mappings
1463 * @sep: pointer to struct sep_device
1464 * @data_size:
1465 * @block_size:
1466 * @lli_table_ptr:
1467 * @num_entries_ptr:
1468 * @table_data_size_ptr:
1469 * @is_kva: set for kernel data (kernel cryptio call)
1470 *
1471 * This function prepares only input DMA table for synhronic symmetric
1472 * operations (HASH)
1473 * Note that all bus addresses that are passed to the SEP
1474 * are in 32 bit format; the SEP is a 32 bit device
1475 */
1476 static int sep_prepare_input_dma_table(struct sep_device *sep,
1477 unsigned long app_virt_addr,
1478 u32 data_size,
1479 u32 block_size,
1480 dma_addr_t *lli_table_ptr,
1481 u32 *num_entries_ptr,
1482 u32 *table_data_size_ptr,
1483 bool is_kva)
1484 {
1485 int error = 0;
1486 /* Pointer to the info entry of the table - the last entry */
1487 struct sep_lli_entry *info_entry_ptr;
1488 /* Array of pointers to page */
1489 struct sep_lli_entry *lli_array_ptr;
1490 /* Points to the first entry to be processed in the lli_in_array */
1491 u32 current_entry = 0;
1492 /* Num entries in the virtual buffer */
1493 u32 sep_lli_entries = 0;
1494 /* Lli table pointer */
1495 struct sep_lli_entry *in_lli_table_ptr;
1496 /* The total data in one table */
1497 u32 table_data_size = 0;
1498 /* Flag for last table */
1499 u32 last_table_flag = 0;
1500 /* Number of entries in lli table */
1501 u32 num_entries_in_table = 0;
1502 /* Next table address */
1503 void *lli_table_alloc_addr = 0;
1504
1505 dev_dbg(&sep->pdev->dev, "prepare intput dma table data_size is %x\n", data_size);
1506 dev_dbg(&sep->pdev->dev, "block_size is %x\n", block_size);
1507
1508 /* Initialize the pages pointers */
1509 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
1510 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 0;
1511
1512 /* Set the kernel address for first table to be allocated */
1513 lli_table_alloc_addr = (void *)(sep->shared_addr +
1514 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1515 sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1516 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1517
1518 if (data_size == 0) {
1519 /* Special case - create meptu table - 2 entries, zero data */
1520 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1521 num_entries_ptr, table_data_size_ptr);
1522 goto update_dcb_counter;
1523 }
1524
1525 /* Check if the pages are in Kernel Virtual Address layout */
1526 if (is_kva == true)
1527 /* Lock the pages in the kernel */
1528 error = sep_lock_kernel_pages(sep, app_virt_addr,
1529 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
1530 else
1531 /*
1532 * Lock the pages of the user buffer
1533 * and translate them to pages
1534 */
1535 error = sep_lock_user_pages(sep, app_virt_addr,
1536 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
1537
1538 if (error)
1539 goto end_function;
1540
1541 dev_dbg(&sep->pdev->dev, "output sep_in_num_pages is %x\n",
1542 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
1543
1544 current_entry = 0;
1545 info_entry_ptr = NULL;
1546
1547 sep_lli_entries = sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages;
1548
1549 /* Loop till all the entries in in array are not processed */
1550 while (current_entry < sep_lli_entries) {
1551
1552 /* Set the new input and output tables */
1553 in_lli_table_ptr =
1554 (struct sep_lli_entry *)lli_table_alloc_addr;
1555
1556 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1557 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1558
1559 if (lli_table_alloc_addr >
1560 ((void *)sep->shared_addr +
1561 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1562 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1563
1564 error = -ENOMEM;
1565 goto end_function_error;
1566
1567 }
1568
1569 /* Update the number of created tables */
1570 sep->num_lli_tables_created++;
1571
1572 /* Calculate the maximum size of data for input table */
1573 table_data_size = sep_calculate_lli_table_max_size(sep,
1574 &lli_array_ptr[current_entry],
1575 (sep_lli_entries - current_entry),
1576 &last_table_flag);
1577
1578 /*
1579 * If this is not the last table -
1580 * then align it to the block size
1581 */
1582 if (!last_table_flag)
1583 table_data_size =
1584 (table_data_size / block_size) * block_size;
1585
1586 dev_dbg(&sep->pdev->dev, "output table_data_size is %x\n",
1587 table_data_size);
1588
1589 /* Construct input lli table */
1590 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
1591 in_lli_table_ptr,
1592 &current_entry, &num_entries_in_table, table_data_size);
1593
1594 if (info_entry_ptr == NULL) {
1595
1596 /* Set the output parameters to physical addresses */
1597 *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
1598 in_lli_table_ptr);
1599 *num_entries_ptr = num_entries_in_table;
1600 *table_data_size_ptr = table_data_size;
1601
1602 dev_dbg(&sep->pdev->dev,
1603 "output lli_table_in_ptr is %08lx\n",
1604 (unsigned long)*lli_table_ptr);
1605
1606 } else {
1607 /* Update the info entry of the previous in table */
1608 info_entry_ptr->bus_address =
1609 sep_shared_area_virt_to_bus(sep,
1610 in_lli_table_ptr);
1611 info_entry_ptr->block_size =
1612 ((num_entries_in_table) << 24) |
1613 (table_data_size);
1614 }
1615 /* Save the pointer to the info entry of the current tables */
1616 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1617 }
1618 /* Print input tables */
1619 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
1620 sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
1621 *num_entries_ptr, *table_data_size_ptr);
1622 /* The array of the pages */
1623 kfree(lli_array_ptr);
1624
1625 update_dcb_counter:
1626 /* Update DCB counter */
1627 sep->nr_dcb_creat++;
1628 goto end_function;
1629
1630 end_function_error:
1631 /* Free all the allocated resources */
1632 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
1633 kfree(lli_array_ptr);
1634 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
1635
1636 end_function:
1637 return error;
1638
1639 }
1640 /**
1641 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
1642 * @sep: pointer to struct sep_device
1643 * @lli_in_array:
1644 * @sep_in_lli_entries:
1645 * @lli_out_array:
1646 * @sep_out_lli_entries
1647 * @block_size
1648 * @lli_table_in_ptr
1649 * @lli_table_out_ptr
1650 * @in_num_entries_ptr
1651 * @out_num_entries_ptr
1652 * @table_data_size_ptr
1653 *
1654 * This function creates the input and output DMA tables for
1655 * symmetric operations (AES/DES) according to the block
1656 * size from LLI arays
1657 * Note that all bus addresses that are passed to the SEP
1658 * are in 32 bit format; the SEP is a 32 bit device
1659 */
1660 static int sep_construct_dma_tables_from_lli(
1661 struct sep_device *sep,
1662 struct sep_lli_entry *lli_in_array,
1663 u32 sep_in_lli_entries,
1664 struct sep_lli_entry *lli_out_array,
1665 u32 sep_out_lli_entries,
1666 u32 block_size,
1667 dma_addr_t *lli_table_in_ptr,
1668 dma_addr_t *lli_table_out_ptr,
1669 u32 *in_num_entries_ptr,
1670 u32 *out_num_entries_ptr,
1671 u32 *table_data_size_ptr)
1672 {
1673 /* Points to the area where next lli table can be allocated */
1674 void *lli_table_alloc_addr = 0;
1675 /* Input lli table */
1676 struct sep_lli_entry *in_lli_table_ptr = NULL;
1677 /* Output lli table */
1678 struct sep_lli_entry *out_lli_table_ptr = NULL;
1679 /* Pointer to the info entry of the table - the last entry */
1680 struct sep_lli_entry *info_in_entry_ptr = NULL;
1681 /* Pointer to the info entry of the table - the last entry */
1682 struct sep_lli_entry *info_out_entry_ptr = NULL;
1683 /* Points to the first entry to be processed in the lli_in_array */
1684 u32 current_in_entry = 0;
1685 /* Points to the first entry to be processed in the lli_out_array */
1686 u32 current_out_entry = 0;
1687 /* Max size of the input table */
1688 u32 in_table_data_size = 0;
1689 /* Max size of the output table */
1690 u32 out_table_data_size = 0;
1691 /* Flag te signifies if this is the last tables build */
1692 u32 last_table_flag = 0;
1693 /* The data size that should be in table */
1694 u32 table_data_size = 0;
1695 /* Number of etnries in the input table */
1696 u32 num_entries_in_table = 0;
1697 /* Number of etnries in the output table */
1698 u32 num_entries_out_table = 0;
1699
1700 /* Initiate to point after the message area */
1701 lli_table_alloc_addr = (void *)(sep->shared_addr +
1702 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1703 (sep->num_lli_tables_created *
1704 (sizeof(struct sep_lli_entry) *
1705 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
1706
1707 /* Loop till all the entries in in array are not processed */
1708 while (current_in_entry < sep_in_lli_entries) {
1709 /* Set the new input and output tables */
1710 in_lli_table_ptr =
1711 (struct sep_lli_entry *)lli_table_alloc_addr;
1712
1713 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1714 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1715
1716 /* Set the first output tables */
1717 out_lli_table_ptr =
1718 (struct sep_lli_entry *)lli_table_alloc_addr;
1719
1720 /* Check if the DMA table area limit was overrun */
1721 if ((lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
1722 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
1723 ((void *)sep->shared_addr +
1724 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1725 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1726
1727 dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
1728 return -ENOMEM;
1729 }
1730
1731 /* Update the number of the lli tables created */
1732 sep->num_lli_tables_created += 2;
1733
1734 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1735 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1736
1737 /* Calculate the maximum size of data for input table */
1738 in_table_data_size =
1739 sep_calculate_lli_table_max_size(sep,
1740 &lli_in_array[current_in_entry],
1741 (sep_in_lli_entries - current_in_entry),
1742 &last_table_flag);
1743
1744 /* Calculate the maximum size of data for output table */
1745 out_table_data_size =
1746 sep_calculate_lli_table_max_size(sep,
1747 &lli_out_array[current_out_entry],
1748 (sep_out_lli_entries - current_out_entry),
1749 &last_table_flag);
1750
1751 dev_dbg(&sep->pdev->dev,
1752 "construct tables from lli in_table_data_size is %x\n",
1753 in_table_data_size);
1754
1755 dev_dbg(&sep->pdev->dev,
1756 "construct tables from lli out_table_data_size is %x\n",
1757 out_table_data_size);
1758
1759 table_data_size = in_table_data_size;
1760
1761 if (!last_table_flag) {
1762 /*
1763 * If this is not the last table,
1764 * then must check where the data is smallest
1765 * and then align it to the block size
1766 */
1767 if (table_data_size > out_table_data_size)
1768 table_data_size = out_table_data_size;
1769
1770 /*
1771 * Now calculate the table size so that
1772 * it will be module block size
1773 */
1774 table_data_size = (table_data_size / block_size) *
1775 block_size;
1776 }
1777
1778 /* Construct input lli table */
1779 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
1780 in_lli_table_ptr,
1781 &current_in_entry,
1782 &num_entries_in_table,
1783 table_data_size);
1784
1785 /* Construct output lli table */
1786 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
1787 out_lli_table_ptr,
1788 &current_out_entry,
1789 &num_entries_out_table,
1790 table_data_size);
1791
1792 /* If info entry is null - this is the first table built */
1793 if (info_in_entry_ptr == NULL) {
1794 /* Set the output parameters to physical addresses */
1795 *lli_table_in_ptr =
1796 sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1797
1798 *in_num_entries_ptr = num_entries_in_table;
1799
1800 *lli_table_out_ptr =
1801 sep_shared_area_virt_to_bus(sep,
1802 out_lli_table_ptr);
1803
1804 *out_num_entries_ptr = num_entries_out_table;
1805 *table_data_size_ptr = table_data_size;
1806
1807 dev_dbg(&sep->pdev->dev,
1808 "output lli_table_in_ptr is %08lx\n",
1809 (unsigned long)*lli_table_in_ptr);
1810 dev_dbg(&sep->pdev->dev,
1811 "output lli_table_out_ptr is %08lx\n",
1812 (unsigned long)*lli_table_out_ptr);
1813 } else {
1814 /* Update the info entry of the previous in table */
1815 info_in_entry_ptr->bus_address =
1816 sep_shared_area_virt_to_bus(sep,
1817 in_lli_table_ptr);
1818
1819 info_in_entry_ptr->block_size =
1820 ((num_entries_in_table) << 24) |
1821 (table_data_size);
1822
1823 /* Update the info entry of the previous in table */
1824 info_out_entry_ptr->bus_address =
1825 sep_shared_area_virt_to_bus(sep,
1826 out_lli_table_ptr);
1827
1828 info_out_entry_ptr->block_size =
1829 ((num_entries_out_table) << 24) |
1830 (table_data_size);
1831
1832 dev_dbg(&sep->pdev->dev,
1833 "output lli_table_in_ptr:%08lx %08x\n",
1834 (unsigned long)info_in_entry_ptr->bus_address,
1835 info_in_entry_ptr->block_size);
1836
1837 dev_dbg(&sep->pdev->dev,
1838 "output lli_table_out_ptr:%08lx %08x\n",
1839 (unsigned long)info_out_entry_ptr->bus_address,
1840 info_out_entry_ptr->block_size);
1841 }
1842
1843 /* Save the pointer to the info entry of the current tables */
1844 info_in_entry_ptr = in_lli_table_ptr +
1845 num_entries_in_table - 1;
1846 info_out_entry_ptr = out_lli_table_ptr +
1847 num_entries_out_table - 1;
1848
1849 dev_dbg(&sep->pdev->dev,
1850 "output num_entries_out_table is %x\n",
1851 (u32)num_entries_out_table);
1852 dev_dbg(&sep->pdev->dev,
1853 "output info_in_entry_ptr is %lx\n",
1854 (unsigned long)info_in_entry_ptr);
1855 dev_dbg(&sep->pdev->dev,
1856 "output info_out_entry_ptr is %lx\n",
1857 (unsigned long)info_out_entry_ptr);
1858 }
1859
1860 /* Print input tables */
1861 sep_debug_print_lli_tables(sep,
1862 (struct sep_lli_entry *)
1863 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
1864 *in_num_entries_ptr,
1865 *table_data_size_ptr);
1866
1867 /* Print output tables */
1868 sep_debug_print_lli_tables(sep,
1869 (struct sep_lli_entry *)
1870 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
1871 *out_num_entries_ptr,
1872 *table_data_size_ptr);
1873
1874 return 0;
1875 }
1876
1877 /**
1878 * sep_prepare_input_output_dma_table - prepare DMA I/O table
1879 * @app_virt_in_addr:
1880 * @app_virt_out_addr:
1881 * @data_size:
1882 * @block_size:
1883 * @lli_table_in_ptr:
1884 * @lli_table_out_ptr:
1885 * @in_num_entries_ptr:
1886 * @out_num_entries_ptr:
1887 * @table_data_size_ptr:
1888 * @is_kva: set for kernel data; used only for kernel crypto module
1889 *
1890 * This function builds input and output DMA tables for synhronic
1891 * symmetric operations (AES, DES, HASH). It also checks that each table
1892 * is of the modular block size
1893 * Note that all bus addresses that are passed to the SEP
1894 * are in 32 bit format; the SEP is a 32 bit device
1895 */
1896 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1897 unsigned long app_virt_in_addr,
1898 unsigned long app_virt_out_addr,
1899 u32 data_size,
1900 u32 block_size,
1901 dma_addr_t *lli_table_in_ptr,
1902 dma_addr_t *lli_table_out_ptr,
1903 u32 *in_num_entries_ptr,
1904 u32 *out_num_entries_ptr,
1905 u32 *table_data_size_ptr,
1906 bool is_kva)
1907
1908 {
1909 int error = 0;
1910 /* Array of pointers of page */
1911 struct sep_lli_entry *lli_in_array;
1912 /* Array of pointers of page */
1913 struct sep_lli_entry *lli_out_array;
1914
1915 if (data_size == 0) {
1916 /* Prepare empty table for input and output */
1917 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
1918 in_num_entries_ptr, table_data_size_ptr);
1919
1920 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
1921 out_num_entries_ptr, table_data_size_ptr);
1922
1923 goto update_dcb_counter;
1924 }
1925
1926 /* Initialize the pages pointers */
1927 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
1928 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
1929
1930 /* Lock the pages of the buffer and translate them to pages */
1931 if (is_kva == true) {
1932 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
1933 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
1934
1935 if (error) {
1936 dev_warn(&sep->pdev->dev,
1937 "lock kernel for in failed\n");
1938 goto end_function;
1939 }
1940
1941 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
1942 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
1943
1944 if (error) {
1945 dev_warn(&sep->pdev->dev,
1946 "lock kernel for out failed\n");
1947 goto end_function;
1948 }
1949 }
1950
1951 else {
1952 error = sep_lock_user_pages(sep, app_virt_in_addr,
1953 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
1954 if (error) {
1955 dev_warn(&sep->pdev->dev,
1956 "sep_lock_user_pages for input virtual buffer failed\n");
1957 goto end_function;
1958 }
1959
1960 error = sep_lock_user_pages(sep, app_virt_out_addr,
1961 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
1962
1963 if (error) {
1964 dev_warn(&sep->pdev->dev,
1965 "sep_lock_user_pages for output virtual buffer failed\n");
1966 goto end_function_free_lli_in;
1967 }
1968 }
1969
1970 dev_dbg(&sep->pdev->dev, "prep input output dma table sep_in_num_pages is %x\n",
1971 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
1972 dev_dbg(&sep->pdev->dev, "sep_out_num_pages is %x\n",
1973 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages);
1974 dev_dbg(&sep->pdev->dev, "SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n",
1975 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1976
1977 /* Call the function that creates table from the lli arrays */
1978 error = sep_construct_dma_tables_from_lli(sep, lli_in_array,
1979 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages,
1980 lli_out_array,
1981 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages,
1982 block_size, lli_table_in_ptr, lli_table_out_ptr,
1983 in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1984
1985 if (error) {
1986 dev_warn(&sep->pdev->dev,
1987 "sep_construct_dma_tables_from_lli failed\n");
1988 goto end_function_with_error;
1989 }
1990
1991 kfree(lli_out_array);
1992 kfree(lli_in_array);
1993
1994 update_dcb_counter:
1995 /* Update DCB counter */
1996 sep->nr_dcb_creat++;
1997
1998 goto end_function;
1999
2000 end_function_with_error:
2001 kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_map_array);
2002 kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_page_array);
2003 kfree(lli_out_array);
2004
2005
2006 end_function_free_lli_in:
2007 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
2008 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
2009 kfree(lli_in_array);
2010
2011 end_function:
2012
2013 return error;
2014
2015 }
2016
2017 /**
2018 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2019 * @app_in_address: unsigned long; for data buffer in (user space)
2020 * @app_out_address: unsigned long; for data buffer out (user space)
2021 * @data_in_size: u32; for size of data
2022 * @block_size: u32; for block size
2023 * @tail_block_size: u32; for size of tail block
2024 * @isapplet: bool; to indicate external app
2025 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2026 *
2027 * This function prepares the linked DMA tables and puts the
2028 * address for the linked list of tables inta a DCB (data control
2029 * block) the address of which is known by the SEP hardware
2030 * Note that all bus addresses that are passed to the SEP
2031 * are in 32 bit format; the SEP is a 32 bit device
2032 */
2033 static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2034 unsigned long app_in_address,
2035 unsigned long app_out_address,
2036 u32 data_in_size,
2037 u32 block_size,
2038 u32 tail_block_size,
2039 bool isapplet,
2040 bool is_kva)
2041 {
2042 int error = 0;
2043 /* Size of tail */
2044 u32 tail_size = 0;
2045 /* Address of the created DCB table */
2046 struct sep_dcblock *dcb_table_ptr = NULL;
2047 /* The physical address of the first input DMA table */
2048 dma_addr_t in_first_mlli_address = 0;
2049 /* Number of entries in the first input DMA table */
2050 u32 in_first_num_entries = 0;
2051 /* The physical address of the first output DMA table */
2052 dma_addr_t out_first_mlli_address = 0;
2053 /* Number of entries in the first output DMA table */
2054 u32 out_first_num_entries = 0;
2055 /* Data in the first input/output table */
2056 u32 first_data_size = 0;
2057
2058 if (sep->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2059 /* No more DCBs to allocate */
2060 dev_warn(&sep->pdev->dev, "no more DCBs available\n");
2061 error = -ENOSPC;
2062 goto end_function;
2063 }
2064
2065 /* Allocate new DCB */
2066 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2067 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2068 (sep->nr_dcb_creat * sizeof(struct sep_dcblock)));
2069
2070 /* Set the default values in the DCB */
2071 dcb_table_ptr->input_mlli_address = 0;
2072 dcb_table_ptr->input_mlli_num_entries = 0;
2073 dcb_table_ptr->input_mlli_data_size = 0;
2074 dcb_table_ptr->output_mlli_address = 0;
2075 dcb_table_ptr->output_mlli_num_entries = 0;
2076 dcb_table_ptr->output_mlli_data_size = 0;
2077 dcb_table_ptr->tail_data_size = 0;
2078 dcb_table_ptr->out_vr_tail_pt = 0;
2079
2080 if (isapplet == true) {
2081
2082 /* Check if there is enough data for DMA operation */
2083 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2084 if (is_kva == true) {
2085 memcpy(dcb_table_ptr->tail_data,
2086 (void *)app_in_address, data_in_size);
2087 } else {
2088 if (copy_from_user(dcb_table_ptr->tail_data,
2089 (void __user *)app_in_address,
2090 data_in_size)) {
2091 error = -EFAULT;
2092 goto end_function;
2093 }
2094 }
2095
2096 dcb_table_ptr->tail_data_size = data_in_size;
2097
2098 /* Set the output user-space address for mem2mem op */
2099 if (app_out_address)
2100 dcb_table_ptr->out_vr_tail_pt =
2101 (aligned_u64)app_out_address;
2102
2103 /*
2104 * Update both data length parameters in order to avoid
2105 * second data copy and allow building of empty mlli
2106 * tables
2107 */
2108 tail_size = 0x0;
2109 data_in_size = 0x0;
2110
2111 } else {
2112 if (!app_out_address) {
2113 tail_size = data_in_size % block_size;
2114 if (!tail_size) {
2115 if (tail_block_size == block_size)
2116 tail_size = block_size;
2117 }
2118 } else {
2119 tail_size = 0;
2120 }
2121 }
2122 if (tail_size) {
2123 if (is_kva == true) {
2124 memcpy(dcb_table_ptr->tail_data,
2125 (void *)(app_in_address + data_in_size -
2126 tail_size), tail_size);
2127 } else {
2128 /* We have tail data - copy it to DCB */
2129 if (copy_from_user(dcb_table_ptr->tail_data,
2130 (void *)(app_in_address +
2131 data_in_size - tail_size), tail_size)) {
2132 error = -EFAULT;
2133 goto end_function;
2134 }
2135 }
2136 if (app_out_address)
2137 /*
2138 * Calculate the output address
2139 * according to tail data size
2140 */
2141 dcb_table_ptr->out_vr_tail_pt =
2142 (aligned_u64)app_out_address + data_in_size
2143 - tail_size;
2144
2145 /* Save the real tail data size */
2146 dcb_table_ptr->tail_data_size = tail_size;
2147 /*
2148 * Update the data size without the tail
2149 * data size AKA data for the dma
2150 */
2151 data_in_size = (data_in_size - tail_size);
2152 }
2153 }
2154 /* Check if we need to build only input table or input/output */
2155 if (app_out_address) {
2156 /* Prepare input/output tables */
2157 error = sep_prepare_input_output_dma_table(sep,
2158 app_in_address,
2159 app_out_address,
2160 data_in_size,
2161 block_size,
2162 &in_first_mlli_address,
2163 &out_first_mlli_address,
2164 &in_first_num_entries,
2165 &out_first_num_entries,
2166 &first_data_size,
2167 is_kva);
2168 } else {
2169 /* Prepare input tables */
2170 error = sep_prepare_input_dma_table(sep,
2171 app_in_address,
2172 data_in_size,
2173 block_size,
2174 &in_first_mlli_address,
2175 &in_first_num_entries,
2176 &first_data_size,
2177 is_kva);
2178 }
2179
2180 if (error) {
2181 dev_warn(&sep->pdev->dev, "prepare DMA table call failed from prepare DCB call\n");
2182 goto end_function;
2183 }
2184
2185 /* Set the DCB values */
2186 dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2187 dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2188 dcb_table_ptr->input_mlli_data_size = first_data_size;
2189 dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2190 dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2191 dcb_table_ptr->output_mlli_data_size = first_data_size;
2192
2193 end_function:
2194 return error;
2195
2196 }
2197
2198 /**
2199 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2200 * @sep: pointer to struct sep_device
2201 * @isapplet: indicates external application (used for kernel access)
2202 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2203 *
2204 * This function frees the DMA tables and DCB
2205 */
2206 static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2207 bool is_kva)
2208 {
2209 int i = 0;
2210 int error = 0;
2211 int error_temp = 0;
2212 struct sep_dcblock *dcb_table_ptr;
2213 unsigned long pt_hold;
2214 void *tail_pt;
2215
2216 if (isapplet == true) {
2217 /* Set pointer to first DCB table */
2218 dcb_table_ptr = (struct sep_dcblock *)
2219 (sep->shared_addr +
2220 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2221
2222 /* Go over each DCB and see if tail pointer must be updated */
2223 for (i = 0; i < sep->nr_dcb_creat; i++, dcb_table_ptr++) {
2224 if (dcb_table_ptr->out_vr_tail_pt) {
2225 pt_hold = (unsigned long)dcb_table_ptr->out_vr_tail_pt;
2226 tail_pt = (void *)pt_hold;
2227 if (is_kva == true) {
2228 memcpy(tail_pt,
2229 dcb_table_ptr->tail_data,
2230 dcb_table_ptr->tail_data_size);
2231 } else {
2232 error_temp = copy_to_user(
2233 tail_pt,
2234 dcb_table_ptr->tail_data,
2235 dcb_table_ptr->tail_data_size);
2236 }
2237 if (error_temp) {
2238 /* Release the DMA resource */
2239 error = -EFAULT;
2240 break;
2241 }
2242 }
2243 }
2244 }
2245 /* Free the output pages, if any */
2246 sep_free_dma_table_data_handler(sep);
2247
2248 return error;
2249 }
2250
2251 /**
2252 * sep_get_static_pool_addr_handler - get static pool address
2253 * @sep: pointer to struct sep_device
2254 *
2255 * This function sets the bus and virtual addresses of the static pool
2256 */
2257 static int sep_get_static_pool_addr_handler(struct sep_device *sep)
2258 {
2259 u32 *static_pool_addr = NULL;
2260
2261 static_pool_addr = (u32 *)(sep->shared_addr +
2262 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
2263
2264 static_pool_addr[0] = SEP_STATIC_POOL_VAL_TOKEN;
2265 static_pool_addr[1] = (u32)sep->shared_bus +
2266 SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2267
2268 dev_dbg(&sep->pdev->dev, "static pool segment: physical %x\n",
2269 (u32)static_pool_addr[1]);
2270
2271 return 0;
2272 }
2273
2274 /**
2275 * sep_end_transaction_handler - end transaction
2276 * @sep: pointer to struct sep_device
2277 *
2278 * This API handles the end transaction request
2279 */
2280 static int sep_end_transaction_handler(struct sep_device *sep)
2281 {
2282 /* Clear the data pool pointers Token */
2283 memset((void *)(sep->shared_addr +
2284 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES),
2285 0, sep->num_of_data_allocations*2*sizeof(u32));
2286
2287 /* Check that all the DMA resources were freed */
2288 sep_free_dma_table_data_handler(sep);
2289
2290 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
2291
2292 /*
2293 * We are now through with the transaction. Let's
2294 * allow other processes who have the device open
2295 * to perform transactions
2296 */
2297 mutex_lock(&sep->sep_mutex);
2298 sep->pid_doing_transaction = 0;
2299 mutex_unlock(&sep->sep_mutex);
2300 /* Raise event for stuck contextes */
2301 wake_up(&sep->event);
2302
2303 return 0;
2304 }
2305
2306 /**
2307 * sep_prepare_dcb_handler - prepare a control block
2308 * @sep: pointer to struct sep_device
2309 * @arg: pointer to user parameters
2310 *
2311 * This function will retrieve the RAR buffer physical addresses, type
2312 * & size corresponding to the RAR handles provided in the buffers vector.
2313 */
2314 static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg)
2315 {
2316 int error;
2317 /* Command arguments */
2318 struct build_dcb_struct command_args;
2319
2320 /* Get the command arguments */
2321 if (copy_from_user(&command_args, (void __user *)arg,
2322 sizeof(struct build_dcb_struct))) {
2323 error = -EFAULT;
2324 goto end_function;
2325 }
2326
2327 dev_dbg(&sep->pdev->dev, "prep dcb handler app_in_address is %08llx\n",
2328 command_args.app_in_address);
2329 dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
2330 command_args.app_out_address);
2331 dev_dbg(&sep->pdev->dev, "data_size is %x\n",
2332 command_args.data_in_size);
2333 dev_dbg(&sep->pdev->dev, "block_size is %x\n",
2334 command_args.block_size);
2335 dev_dbg(&sep->pdev->dev, "tail block_size is %x\n",
2336 command_args.tail_block_size);
2337
2338 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2339 (unsigned long)command_args.app_in_address,
2340 (unsigned long)command_args.app_out_address,
2341 command_args.data_in_size, command_args.block_size,
2342 command_args.tail_block_size, true, false);
2343
2344 end_function:
2345 return error;
2346
2347 }
2348
2349 /**
2350 * sep_free_dcb_handler - free control block resources
2351 * @sep: pointer to struct sep_device
2352 *
2353 * This function frees the DCB resources and updates the needed
2354 * user-space buffers.
2355 */
2356 static int sep_free_dcb_handler(struct sep_device *sep)
2357 {
2358 return sep_free_dma_tables_and_dcb(sep, false, false);
2359 }
2360
2361 /**
2362 * sep_rar_prepare_output_msg_handler - prepare an output message
2363 * @sep: pointer to struct sep_device
2364 * @arg: pointer to user parameters
2365 *
2366 * This function will retrieve the RAR buffer physical addresses, type
2367 * & size corresponding to the RAR handles provided in the buffers vector.
2368 */
2369 static int sep_rar_prepare_output_msg_handler(struct sep_device *sep,
2370 unsigned long arg)
2371 {
2372 int error = 0;
2373 /* Command args */
2374 struct rar_hndl_to_bus_struct command_args;
2375 struct RAR_buffer rar_buf;
2376 /* Bus address */
2377 dma_addr_t rar_bus = 0;
2378 /* Holds the RAR address in the system memory offset */
2379 u32 *rar_addr;
2380
2381 /* Copy the data */
2382 if (copy_from_user(&command_args, (void __user *)arg,
2383 sizeof(command_args))) {
2384 error = -EFAULT;
2385 goto end_function;
2386 }
2387
2388 /* Call to translation function only if user handle is not NULL */
2389 if (command_args.rar_handle) {
2390 memset(&rar_buf, 0, sizeof(rar_buf));
2391 rar_buf.info.handle = (u32)command_args.rar_handle;
2392
2393 if (rar_handle_to_bus(&rar_buf, 1) != 1) {
2394 error = -EFAULT;
2395 goto end_function;
2396 }
2397 rar_bus = rar_buf.bus_address;
2398 }
2399 dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus);
2400
2401 /* Set value in the SYSTEM MEMORY offset */
2402 rar_addr = (u32 *)(sep->shared_addr +
2403 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
2404
2405 /* Copy the physical address to the System Area for the SEP */
2406 rar_addr[0] = SEP_RAR_VAL_TOKEN;
2407 rar_addr[1] = rar_bus;
2408
2409 end_function:
2410 return error;
2411 }
2412
2413 /**
2414 * sep_ioctl - ioctl api
2415 * @filp: pointer to struct file
2416 * @cmd: command
2417 * @arg: pointer to argument structure
2418 *
2419 * Implement the ioctl methods available on the SEP device.
2420 */
2421 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2422 {
2423 int error = 0;
2424 struct sep_device *sep = filp->private_data;
2425
2426 /* Make sure we own this device */
2427 mutex_lock(&sep->sep_mutex);
2428 if ((current->pid != sep->pid_doing_transaction) &&
2429 (sep->pid_doing_transaction != 0)) {
2430 dev_dbg(&sep->pdev->dev, "ioctl pid is not owner\n");
2431 error = -EACCES;
2432 goto end_function;
2433 }
2434
2435 mutex_unlock(&sep->sep_mutex);
2436
2437 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2438 return -ENOTTY;
2439
2440 /* Lock to prevent the daemon to interfere with operation */
2441 mutex_lock(&sep->ioctl_mutex);
2442
2443 switch (cmd) {
2444 case SEP_IOCSENDSEPCOMMAND:
2445 /* Send command to SEP */
2446 error = sep_send_command_handler(sep);
2447 break;
2448 case SEP_IOCALLOCDATAPOLL:
2449 /* Allocate data pool */
2450 error = sep_allocate_data_pool_memory_handler(sep, arg);
2451 break;
2452 case SEP_IOCGETSTATICPOOLADDR:
2453 /* Inform the SEP the bus address of the static pool */
2454 error = sep_get_static_pool_addr_handler(sep);
2455 break;
2456 case SEP_IOCENDTRANSACTION:
2457 error = sep_end_transaction_handler(sep);
2458 break;
2459 case SEP_IOCRARPREPAREMESSAGE:
2460 error = sep_rar_prepare_output_msg_handler(sep, arg);
2461 break;
2462 case SEP_IOCPREPAREDCB:
2463 error = sep_prepare_dcb_handler(sep, arg);
2464 break;
2465 case SEP_IOCFREEDCB:
2466 error = sep_free_dcb_handler(sep);
2467 break;
2468 default:
2469 error = -ENOTTY;
2470 break;
2471 }
2472
2473 end_function:
2474 mutex_unlock(&sep->ioctl_mutex);
2475 return error;
2476 }
2477
2478 /**
2479 * sep_singleton_ioctl - ioctl api for singleton interface
2480 * @filp: pointer to struct file
2481 * @cmd: command
2482 * @arg: pointer to argument structure
2483 *
2484 * Implement the additional ioctls for the singleton device
2485 */
2486 static long sep_singleton_ioctl(struct file *filp, u32 cmd, unsigned long arg)
2487 {
2488 long error = 0;
2489 struct sep_device *sep = filp->private_data;
2490
2491 /* Check that the command is for the SEP device */
2492 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2493 return -ENOTTY;
2494
2495 /* Make sure we own this device */
2496 mutex_lock(&sep->sep_mutex);
2497 if ((current->pid != sep->pid_doing_transaction) &&
2498 (sep->pid_doing_transaction != 0)) {
2499 dev_dbg(&sep->pdev->dev, "singleton ioctl pid is not owner\n");
2500 mutex_unlock(&sep->sep_mutex);
2501 return -EACCES;
2502 }
2503
2504 mutex_unlock(&sep->sep_mutex);
2505
2506 switch (cmd) {
2507 case SEP_IOCTLSETCALLERID:
2508 mutex_lock(&sep->ioctl_mutex);
2509 error = sep_set_caller_id_handler(sep, arg);
2510 mutex_unlock(&sep->ioctl_mutex);
2511 break;
2512 default:
2513 error = sep_ioctl(filp, cmd, arg);
2514 break;
2515 }
2516 return error;
2517 }
2518
2519 /**
2520 * sep_request_daemon_ioctl - ioctl for daemon
2521 * @filp: pointer to struct file
2522 * @cmd: command
2523 * @arg: pointer to argument structure
2524 *
2525 * Called by the request daemon to perform ioctls on the daemon device
2526 */
2527 static long sep_request_daemon_ioctl(struct file *filp, u32 cmd,
2528 unsigned long arg)
2529 {
2530
2531 long error;
2532 struct sep_device *sep = filp->private_data;
2533
2534 /* Check that the command is for SEP device */
2535 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2536 return -ENOTTY;
2537
2538 /* Only one process can access ioctl at any given time */
2539 mutex_lock(&sep->ioctl_mutex);
2540
2541 switch (cmd) {
2542 case SEP_IOCSENDSEPRPLYCOMMAND:
2543 /* Send reply command to SEP */
2544 error = sep_req_daemon_send_reply_command_handler(sep);
2545 break;
2546 case SEP_IOCENDTRANSACTION:
2547 /*
2548 * End req daemon transaction, do nothing
2549 * will be removed upon update in middleware
2550 * API library
2551 */
2552 error = 0;
2553 break;
2554 default:
2555 error = -ENOTTY;
2556 }
2557 mutex_unlock(&sep->ioctl_mutex);
2558 return error;
2559 }
2560
2561 /**
2562 * sep_inthandler - interrupt handler
2563 * @irq: interrupt
2564 * @dev_id: device id
2565 */
2566 static irqreturn_t sep_inthandler(int irq, void *dev_id)
2567 {
2568 irqreturn_t int_error = IRQ_HANDLED;
2569 unsigned long lck_flags;
2570 u32 reg_val, reg_val2 = 0;
2571 struct sep_device *sep = dev_id;
2572
2573 /* Read the IRR register to check if this is SEP interrupt */
2574 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2575
2576 if (reg_val & (0x1 << 13)) {
2577 /* Lock and update the counter of reply messages */
2578 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
2579 sep->reply_ct++;
2580 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
2581
2582 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
2583 sep->send_ct, sep->reply_ct);
2584
2585 /* Is this printf or daemon request? */
2586 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
2587 dev_dbg(&sep->pdev->dev,
2588 "SEP Interrupt - reg2 is %08x\n", reg_val2);
2589
2590 if ((reg_val2 >> 30) & 0x1) {
2591 dev_dbg(&sep->pdev->dev, "int: printf request\n");
2592 wake_up(&sep->event_request_daemon);
2593 } else if (reg_val2 >> 31) {
2594 dev_dbg(&sep->pdev->dev, "int: daemon request\n");
2595 wake_up(&sep->event_request_daemon);
2596 } else {
2597 dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
2598 wake_up(&sep->event);
2599 }
2600 } else {
2601 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
2602 int_error = IRQ_NONE;
2603 }
2604 if (int_error == IRQ_HANDLED)
2605 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2606
2607 return int_error;
2608 }
2609
2610 /**
2611 * sep_reconfig_shared_area - reconfigure shared area
2612 * @sep: pointer to struct sep_device
2613 *
2614 * Reconfig the shared area between HOST and SEP - needed in case
2615 * the DX_CC_Init function was called before OS loading.
2616 */
2617 static int sep_reconfig_shared_area(struct sep_device *sep)
2618 {
2619 int ret_val;
2620
2621 /* use to limit waiting for SEP */
2622 unsigned long end_time;
2623
2624 /* Send the new SHARED MESSAGE AREA to the SEP */
2625 dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
2626 (unsigned long long)sep->shared_bus);
2627
2628 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
2629
2630 /* Poll for SEP response */
2631 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2632
2633 end_time = jiffies + (WAIT_TIME * HZ);
2634
2635 while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
2636 (ret_val != sep->shared_bus))
2637 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2638
2639 /* Check the return value (register) */
2640 if (ret_val != sep->shared_bus) {
2641 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
2642 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
2643 ret_val = -ENOMEM;
2644 } else
2645 ret_val = 0;
2646
2647 dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
2648 return ret_val;
2649 }
2650
2651 /* File operation for singleton SEP operations */
2652 static const struct file_operations singleton_file_operations = {
2653 .owner = THIS_MODULE,
2654 .unlocked_ioctl = sep_singleton_ioctl,
2655 .poll = sep_poll,
2656 .open = sep_singleton_open,
2657 .release = sep_singleton_release,
2658 .mmap = sep_mmap,
2659 };
2660
2661 /* File operation for daemon operations */
2662 static const struct file_operations daemon_file_operations = {
2663 .owner = THIS_MODULE,
2664 .unlocked_ioctl = sep_request_daemon_ioctl,
2665 .poll = sep_request_daemon_poll,
2666 .open = sep_request_daemon_open,
2667 .release = sep_request_daemon_release,
2668 .mmap = sep_request_daemon_mmap,
2669 };
2670
2671 /* The files operations structure of the driver */
2672 static const struct file_operations sep_file_operations = {
2673 .owner = THIS_MODULE,
2674 .unlocked_ioctl = sep_ioctl,
2675 .poll = sep_poll,
2676 .open = sep_open,
2677 .release = sep_release,
2678 .mmap = sep_mmap,
2679 };
2680
2681 /**
2682 * sep_register_driver_with_fs - register misc devices
2683 * @sep: pointer to struct sep_device
2684 *
2685 * This function registers the driver with the file system
2686 */
2687 static int sep_register_driver_with_fs(struct sep_device *sep)
2688 {
2689 int ret_val;
2690
2691 sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
2692 sep->miscdev_sep.name = SEP_DEV_NAME;
2693 sep->miscdev_sep.fops = &sep_file_operations;
2694
2695 sep->miscdev_singleton.minor = MISC_DYNAMIC_MINOR;
2696 sep->miscdev_singleton.name = SEP_DEV_SINGLETON;
2697 sep->miscdev_singleton.fops = &singleton_file_operations;
2698
2699 sep->miscdev_daemon.minor = MISC_DYNAMIC_MINOR;
2700 sep->miscdev_daemon.name = SEP_DEV_DAEMON;
2701 sep->miscdev_daemon.fops = &daemon_file_operations;
2702
2703 ret_val = misc_register(&sep->miscdev_sep);
2704 if (ret_val) {
2705 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
2706 ret_val);
2707 return ret_val;
2708 }
2709
2710 ret_val = misc_register(&sep->miscdev_singleton);
2711 if (ret_val) {
2712 dev_warn(&sep->pdev->dev, "misc reg fails for sing %x\n",
2713 ret_val);
2714 misc_deregister(&sep->miscdev_sep);
2715 return ret_val;
2716 }
2717
2718 ret_val = misc_register(&sep->miscdev_daemon);
2719 if (ret_val) {
2720 dev_warn(&sep->pdev->dev, "misc reg fails for dmn %x\n",
2721 ret_val);
2722 misc_deregister(&sep->miscdev_sep);
2723 misc_deregister(&sep->miscdev_singleton);
2724
2725 return ret_val;
2726 }
2727 return ret_val;
2728 }
2729
2730
2731 /**
2732 * sep_probe - probe a matching PCI device
2733 * @pdev: pci_device
2734 * @end: pci_device_id
2735 *
2736 * Attempt to set up and configure a SEP device that has been
2737 * discovered by the PCI layer.
2738 */
2739 static int __devinit sep_probe(struct pci_dev *pdev,
2740 const struct pci_device_id *ent)
2741 {
2742 int error = 0;
2743 struct sep_device *sep;
2744
2745 if (sep_dev != NULL) {
2746 dev_warn(&pdev->dev, "only one SEP supported.\n");
2747 return -EBUSY;
2748 }
2749
2750 /* Enable the device */
2751 error = pci_enable_device(pdev);
2752 if (error) {
2753 dev_warn(&pdev->dev, "error enabling pci device\n");
2754 goto end_function;
2755 }
2756
2757 /* Allocate the sep_device structure for this device */
2758 sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
2759 if (sep_dev == NULL) {
2760 dev_warn(&pdev->dev,
2761 "can't kmalloc the sep_device structure\n");
2762 error = -ENOMEM;
2763 goto end_function_disable_device;
2764 }
2765
2766 /*
2767 * We're going to use another variable for actually
2768 * working with the device; this way, if we have
2769 * multiple devices in the future, it would be easier
2770 * to make appropriate changes
2771 */
2772 sep = sep_dev;
2773
2774 sep->pdev = pci_dev_get(pdev);
2775
2776 init_waitqueue_head(&sep->event);
2777 init_waitqueue_head(&sep->event_request_daemon);
2778 spin_lock_init(&sep->snd_rply_lck);
2779 mutex_init(&sep->sep_mutex);
2780 mutex_init(&sep->ioctl_mutex);
2781
2782 dev_dbg(&sep->pdev->dev, "sep probe: PCI obtained, device being prepared\n");
2783 dev_dbg(&sep->pdev->dev, "revision is %d\n", sep->pdev->revision);
2784
2785 /* Set up our register area */
2786 sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
2787 if (!sep->reg_physical_addr) {
2788 dev_warn(&sep->pdev->dev, "Error getting register start\n");
2789 error = -ENODEV;
2790 goto end_function_free_sep_dev;
2791 }
2792
2793 sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
2794 if (!sep->reg_physical_end) {
2795 dev_warn(&sep->pdev->dev, "Error getting register end\n");
2796 error = -ENODEV;
2797 goto end_function_free_sep_dev;
2798 }
2799
2800 sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
2801 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
2802 if (!sep->reg_addr) {
2803 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
2804 error = -ENODEV;
2805 goto end_function_free_sep_dev;
2806 }
2807
2808 dev_dbg(&sep->pdev->dev,
2809 "Register area start %llx end %llx virtual %p\n",
2810 (unsigned long long)sep->reg_physical_addr,
2811 (unsigned long long)sep->reg_physical_end,
2812 sep->reg_addr);
2813
2814 /* Allocate the shared area */
2815 sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2816 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
2817 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
2818 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
2819 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2820
2821 if (sep_map_and_alloc_shared_area(sep)) {
2822 error = -ENOMEM;
2823 /* Allocation failed */
2824 goto end_function_error;
2825 }
2826
2827 /* Clear ICR register */
2828 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2829
2830 /* Set the IMR register - open only GPR 2 */
2831 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2832
2833 /* Read send/receive counters from SEP */
2834 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
2835 sep->reply_ct &= 0x3FFFFFFF;
2836 sep->send_ct = sep->reply_ct;
2837
2838 /* Get the interrupt line */
2839 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
2840 "sep_driver", sep);
2841
2842 if (error)
2843 goto end_function_deallocate_sep_shared_area;
2844
2845 /* The new chip requires a shared area reconfigure */
2846 if (sep->pdev->revision == 4) { /* Only for new chip */
2847 error = sep_reconfig_shared_area(sep);
2848 if (error)
2849 goto end_function_free_irq;
2850 }
2851 /* Finally magic up the device nodes */
2852 /* Register driver with the fs */
2853 error = sep_register_driver_with_fs(sep);
2854 if (error == 0)
2855 /* Success */
2856 return 0;
2857
2858 end_function_free_irq:
2859 free_irq(pdev->irq, sep);
2860
2861 end_function_deallocate_sep_shared_area:
2862 /* De-allocate shared area */
2863 sep_unmap_and_free_shared_area(sep);
2864
2865 end_function_error:
2866 iounmap(sep->reg_addr);
2867
2868 end_function_free_sep_dev:
2869 pci_dev_put(sep_dev->pdev);
2870 kfree(sep_dev);
2871 sep_dev = NULL;
2872
2873 end_function_disable_device:
2874 pci_disable_device(pdev);
2875
2876 end_function:
2877 return error;
2878 }
2879
2880 static void sep_remove(struct pci_dev *pdev)
2881 {
2882 struct sep_device *sep = sep_dev;
2883
2884 /* Unregister from fs */
2885 misc_deregister(&sep->miscdev_sep);
2886 misc_deregister(&sep->miscdev_singleton);
2887 misc_deregister(&sep->miscdev_daemon);
2888
2889 /* Free the irq */
2890 free_irq(sep->pdev->irq, sep);
2891
2892 /* Free the shared area */
2893 sep_unmap_and_free_shared_area(sep_dev);
2894 iounmap((void *) sep_dev->reg_addr);
2895 }
2896
2897 static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
2898 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MFLD_PCI_DEVICE_ID)},
2899 {0}
2900 };
2901
2902 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2903
2904 /* Field for registering driver to PCI device */
2905 static struct pci_driver sep_pci_driver = {
2906 .name = "sep_sec_driver",
2907 .id_table = sep_pci_id_tbl,
2908 .probe = sep_probe,
2909 .remove = sep_remove
2910 };
2911
2912
2913 /**
2914 * sep_init - init function
2915 *
2916 * Module load time. Register the PCI device driver.
2917 */
2918 static int __init sep_init(void)
2919 {
2920 return pci_register_driver(&sep_pci_driver);
2921 }
2922
2923
2924 /**
2925 * sep_exit - called to unload driver
2926 *
2927 * Drop the misc devices then remove and unmap the various resources
2928 * that are not released by the driver remove method.
2929 */
2930 static void __exit sep_exit(void)
2931 {
2932 pci_unregister_driver(&sep_pci_driver);
2933 }
2934
2935
2936 module_init(sep_init);
2937 module_exit(sep_exit);
2938
2939 MODULE_LICENSE("GPL");
This page took 0.108859 seconds and 5 git commands to generate.