3 * sep_main.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2011 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.01.21 Move to sep_main.c to allow for sep_crypto.c
31 * 2011.02.22 Enable kernel crypto operation
33 * Please note that this driver is based on information in the Discretix
34 * CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
35 * Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
36 * Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
37 * Overview and Integration Guide.
40 /* #define SEP_PERF_DEBUG */
42 #include <linux/kernel.h>
43 #include <linux/module.h>
44 #include <linux/miscdevice.h>
46 #include <linux/cdev.h>
47 #include <linux/kdev_t.h>
48 #include <linux/mutex.h>
49 #include <linux/sched.h>
51 #include <linux/poll.h>
52 #include <linux/wait.h>
53 #include <linux/pci.h>
54 #include <linux/pm_runtime.h>
55 #include <linux/slab.h>
56 #include <linux/ioctl.h>
57 #include <asm/current.h>
58 #include <linux/ioport.h>
60 #include <linux/interrupt.h>
61 #include <linux/pagemap.h>
62 #include <asm/cacheflush.h>
63 #include <linux/delay.h>
64 #include <linux/jiffies.h>
65 #include <linux/async.h>
66 #include <linux/crypto.h>
67 #include <crypto/internal/hash.h>
68 #include <crypto/scatterwalk.h>
69 #include <crypto/sha.h>
70 #include <crypto/md5.h>
71 #include <crypto/aes.h>
72 #include <crypto/des.h>
73 #include <crypto/hash.h>
75 #include "sep_driver_hw_defs.h"
76 #include "sep_driver_config.h"
77 #include "sep_driver_api.h"
79 #include "sep_crypto.h"
81 #define CREATE_TRACE_POINTS
82 #include "sep_trace_events.h"
85 * Let's not spend cycles iterating over message
86 * area contents if debugging not enabled
89 #define sep_dump_message(sep) _sep_dump_message(sep)
91 #define sep_dump_message(sep)
95 * Currently, there is only one SEP device per platform;
96 * In event platforms in the future have more than one SEP
97 * device, this will be a linked list
100 struct sep_device
*sep_dev
;
103 * sep_queue_status_remove - Removes transaction from status queue
105 * @sep_queue_info: pointer to status queue
107 * This function will remove information about transaction from the queue.
109 void sep_queue_status_remove(struct sep_device
*sep
,
110 struct sep_queue_info
**queue_elem
)
112 unsigned long lck_flags
;
114 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_queue_status_remove\n",
117 if (!queue_elem
|| !(*queue_elem
)) {
118 dev_dbg(&sep
->pdev
->dev
, "PID%d %s null\n",
119 current
->pid
, __func__
);
123 spin_lock_irqsave(&sep
->sep_queue_lock
, lck_flags
);
124 list_del(&(*queue_elem
)->list
);
125 sep
->sep_queue_num
--;
126 spin_unlock_irqrestore(&sep
->sep_queue_lock
, lck_flags
);
131 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_queue_status_remove return\n",
137 * sep_queue_status_add - Adds transaction to status queue
139 * @opcode: transaction opcode
140 * @size: input data size
141 * @pid: pid of current process
142 * @name: current process name
143 * @name_len: length of name (current process)
145 * This function adds information about about transaction started to the status
148 struct sep_queue_info
*sep_queue_status_add(
149 struct sep_device
*sep
,
153 u8
*name
, size_t name_len
)
155 unsigned long lck_flags
;
156 struct sep_queue_info
*my_elem
= NULL
;
158 my_elem
= kzalloc(sizeof(struct sep_queue_info
), GFP_KERNEL
);
163 dev_dbg(&sep
->pdev
->dev
, "[PID%d] kzalloc ok\n", current
->pid
);
165 my_elem
->data
.opcode
= opcode
;
166 my_elem
->data
.size
= size
;
167 my_elem
->data
.pid
= pid
;
169 if (name_len
> TASK_COMM_LEN
)
170 name_len
= TASK_COMM_LEN
;
172 memcpy(&my_elem
->data
.name
, name
, name_len
);
174 spin_lock_irqsave(&sep
->sep_queue_lock
, lck_flags
);
176 list_add_tail(&my_elem
->list
, &sep
->sep_queue_status
);
177 sep
->sep_queue_num
++;
179 spin_unlock_irqrestore(&sep
->sep_queue_lock
, lck_flags
);
185 * sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
187 * @dmatables_region: Destination pointer for the buffer
188 * @dma_ctx: DMA context for the transaction
189 * @table_count: Number of MLLI/DMA tables to create
190 * The buffer created will not work as-is for DMA operations,
191 * it needs to be copied over to the appropriate place in the
194 static int sep_allocate_dmatables_region(struct sep_device
*sep
,
195 void **dmatables_region
,
196 struct sep_dma_context
*dma_ctx
,
197 const u32 table_count
)
199 const size_t new_len
=
200 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
- 1;
202 void *tmp_region
= NULL
;
204 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dma_ctx = 0x%p\n",
205 current
->pid
, dma_ctx
);
206 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dmatables_region = 0x%p\n",
207 current
->pid
, dmatables_region
);
209 if (!dma_ctx
|| !dmatables_region
) {
210 dev_warn(&sep
->pdev
->dev
,
211 "[PID%d] dma context/region uninitialized\n",
216 dev_dbg(&sep
->pdev
->dev
, "[PID%d] newlen = 0x%08zX\n",
217 current
->pid
, new_len
);
218 dev_dbg(&sep
->pdev
->dev
, "[PID%d] oldlen = 0x%08X\n", current
->pid
,
219 dma_ctx
->dmatables_len
);
220 tmp_region
= kzalloc(new_len
+ dma_ctx
->dmatables_len
, GFP_KERNEL
);
224 /* Were there any previous tables that need to be preserved ? */
225 if (*dmatables_region
) {
226 memcpy(tmp_region
, *dmatables_region
, dma_ctx
->dmatables_len
);
227 kfree(*dmatables_region
);
228 *dmatables_region
= NULL
;
231 *dmatables_region
= tmp_region
;
233 dma_ctx
->dmatables_len
+= new_len
;
239 * sep_wait_transaction - Used for synchronizing transactions
242 int sep_wait_transaction(struct sep_device
*sep
)
247 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT
,
248 &sep
->in_use_flags
)) {
249 dev_dbg(&sep
->pdev
->dev
,
250 "[PID%d] no transactions, returning\n",
252 goto end_function_setpid
;
256 * Looping needed even for exclusive waitq entries
257 * due to process wakeup latencies, previous process
258 * might have already created another transaction.
262 * Exclusive waitq entry, so that only one process is
263 * woken up from the queue at a time.
265 prepare_to_wait_exclusive(&sep
->event_transactions
,
268 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT
,
269 &sep
->in_use_flags
)) {
270 dev_dbg(&sep
->pdev
->dev
,
271 "[PID%d] no transactions, breaking\n",
275 dev_dbg(&sep
->pdev
->dev
,
276 "[PID%d] transactions ongoing, sleeping\n",
279 dev_dbg(&sep
->pdev
->dev
, "[PID%d] woken up\n", current
->pid
);
281 if (signal_pending(current
)) {
282 dev_dbg(&sep
->pdev
->dev
, "[PID%d] received signal\n",
290 * The pid_doing_transaction indicates that this process
291 * now owns the facilities to perform a transaction with
292 * the SEP. While this process is performing a transaction,
293 * no other process who has the SEP device open can perform
294 * any transactions. This method allows more than one process
295 * to have the device open at any given time, which provides
296 * finer granularity for device utilization by multiple
299 /* Only one process is able to progress here at a time */
300 sep
->pid_doing_transaction
= current
->pid
;
303 finish_wait(&sep
->event_transactions
, &wait
);
309 * sep_check_transaction_owner - Checks if current process owns transaction
312 static inline int sep_check_transaction_owner(struct sep_device
*sep
)
314 dev_dbg(&sep
->pdev
->dev
, "[PID%d] transaction pid = %d\n",
316 sep
->pid_doing_transaction
);
318 if ((sep
->pid_doing_transaction
== 0) ||
319 (current
->pid
!= sep
->pid_doing_transaction
)) {
323 /* We own the transaction */
330 * sep_dump_message - dump the message that is pending
332 * This will only print dump if DEBUG is set; it does
333 * follow kernel debug print enabling
335 static void _sep_dump_message(struct sep_device
*sep
)
339 u32
*p
= sep
->shared_addr
;
341 for (count
= 0; count
< 10 * 4; count
+= 4)
342 dev_dbg(&sep
->pdev
->dev
,
343 "[PID%d] Word %d of the message is %x\n",
344 current
->pid
, count
/4, *p
++);
350 * sep_map_and_alloc_shared_area -allocate shared block
351 * @sep: security processor
352 * @size: size of shared area
354 static int sep_map_and_alloc_shared_area(struct sep_device
*sep
)
356 sep
->shared_addr
= dma_alloc_coherent(&sep
->pdev
->dev
,
358 &sep
->shared_bus
, GFP_KERNEL
);
360 if (!sep
->shared_addr
) {
361 dev_dbg(&sep
->pdev
->dev
,
362 "[PID%d] shared memory dma_alloc_coherent failed\n",
366 dev_dbg(&sep
->pdev
->dev
,
367 "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
369 sep
->shared_size
, sep
->shared_addr
,
370 (unsigned long long)sep
->shared_bus
);
375 * sep_unmap_and_free_shared_area - free shared block
376 * @sep: security processor
378 static void sep_unmap_and_free_shared_area(struct sep_device
*sep
)
380 dma_free_coherent(&sep
->pdev
->dev
, sep
->shared_size
,
381 sep
->shared_addr
, sep
->shared_bus
);
387 * sep_shared_bus_to_virt - convert bus/virt addresses
388 * @sep: pointer to struct sep_device
389 * @bus_address: address to convert
391 * Returns virtual address inside the shared area according
392 * to the bus address.
394 static void *sep_shared_bus_to_virt(struct sep_device
*sep
,
395 dma_addr_t bus_address
)
397 return sep
->shared_addr
+ (bus_address
- sep
->shared_bus
);
403 * sep_open - device open method
404 * @inode: inode of SEP device
405 * @filp: file handle to SEP device
407 * Open method for the SEP device. Called when userspace opens
408 * the SEP device node.
410 * Returns zero on success otherwise an error code.
412 static int sep_open(struct inode
*inode
, struct file
*filp
)
414 struct sep_device
*sep
;
415 struct sep_private_data
*priv
;
417 dev_dbg(&sep_dev
->pdev
->dev
, "[PID%d] open\n", current
->pid
);
419 if (filp
->f_flags
& O_NONBLOCK
)
423 * Get the SEP device structure and use it for the
424 * private_data field in filp for other methods
427 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
433 filp
->private_data
= priv
;
435 dev_dbg(&sep_dev
->pdev
->dev
, "[PID%d] priv is 0x%p\n",
438 /* Anyone can open; locking takes place at transaction level */
443 * sep_free_dma_table_data_handler - free DMA table
444 * @sep: pointer to struct sep_device
445 * @dma_ctx: dma context
447 * Handles the request to free DMA table for synchronic actions
449 int sep_free_dma_table_data_handler(struct sep_device
*sep
,
450 struct sep_dma_context
**dma_ctx
)
454 /* Pointer to the current dma_resource struct */
455 struct sep_dma_resource
*dma
;
457 dev_dbg(&sep
->pdev
->dev
,
458 "[PID%d] sep_free_dma_table_data_handler\n",
461 if (!dma_ctx
|| !(*dma_ctx
)) {
462 /* No context or context already freed */
463 dev_dbg(&sep
->pdev
->dev
,
464 "[PID%d] no DMA context or context already freed\n",
470 dev_dbg(&sep
->pdev
->dev
, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
472 (*dma_ctx
)->nr_dcb_creat
);
474 for (dcb_counter
= 0;
475 dcb_counter
< (*dma_ctx
)->nr_dcb_creat
; dcb_counter
++) {
476 dma
= &(*dma_ctx
)->dma_res_arr
[dcb_counter
];
478 /* Unmap and free input map array */
479 if (dma
->in_map_array
) {
480 for (count
= 0; count
< dma
->in_num_pages
; count
++) {
481 dma_unmap_page(&sep
->pdev
->dev
,
482 dma
->in_map_array
[count
].dma_addr
,
483 dma
->in_map_array
[count
].size
,
486 kfree(dma
->in_map_array
);
490 * Output is handled different. If
491 * this was a secure dma into restricted memory,
492 * then we skip this step altogether as restricted
493 * memory is not available to the o/s at all.
495 if (!(*dma_ctx
)->secure_dma
&& dma
->out_map_array
) {
497 for (count
= 0; count
< dma
->out_num_pages
; count
++) {
498 dma_unmap_page(&sep
->pdev
->dev
,
499 dma
->out_map_array
[count
].dma_addr
,
500 dma
->out_map_array
[count
].size
,
503 kfree(dma
->out_map_array
);
506 /* Free page cache for output */
507 if (dma
->in_page_array
) {
508 for (count
= 0; count
< dma
->in_num_pages
; count
++) {
509 flush_dcache_page(dma
->in_page_array
[count
]);
510 page_cache_release(dma
->in_page_array
[count
]);
512 kfree(dma
->in_page_array
);
515 /* Again, we do this only for non secure dma */
516 if (!(*dma_ctx
)->secure_dma
&& dma
->out_page_array
) {
518 for (count
= 0; count
< dma
->out_num_pages
; count
++) {
519 if (!PageReserved(dma
->out_page_array
[count
]))
522 out_page_array
[count
]);
524 flush_dcache_page(dma
->out_page_array
[count
]);
525 page_cache_release(dma
->out_page_array
[count
]);
527 kfree(dma
->out_page_array
);
531 * Note that here we use in_map_num_entries because we
532 * don't have a page array; the page array is generated
533 * only in the lock_user_pages, which is not called
534 * for kernel crypto, which is what the sg (scatter gather
535 * is used for exclusively)
538 dma_unmap_sg(&sep
->pdev
->dev
, dma
->src_sg
,
539 dma
->in_map_num_entries
, DMA_TO_DEVICE
);
544 dma_unmap_sg(&sep
->pdev
->dev
, dma
->dst_sg
,
545 dma
->in_map_num_entries
, DMA_FROM_DEVICE
);
549 /* Reset all the values */
550 dma
->in_page_array
= NULL
;
551 dma
->out_page_array
= NULL
;
552 dma
->in_num_pages
= 0;
553 dma
->out_num_pages
= 0;
554 dma
->in_map_array
= NULL
;
555 dma
->out_map_array
= NULL
;
556 dma
->in_map_num_entries
= 0;
557 dma
->out_map_num_entries
= 0;
560 (*dma_ctx
)->nr_dcb_creat
= 0;
561 (*dma_ctx
)->num_lli_tables_created
= 0;
566 dev_dbg(&sep
->pdev
->dev
,
567 "[PID%d] sep_free_dma_table_data_handler end\n",
574 * sep_end_transaction_handler - end transaction
575 * @sep: pointer to struct sep_device
576 * @dma_ctx: DMA context
577 * @call_status: Call status
579 * This API handles the end transaction request.
581 static int sep_end_transaction_handler(struct sep_device
*sep
,
582 struct sep_dma_context
**dma_ctx
,
583 struct sep_call_status
*call_status
,
584 struct sep_queue_info
**my_queue_elem
)
586 dev_dbg(&sep
->pdev
->dev
, "[PID%d] ending transaction\n", current
->pid
);
589 * Extraneous transaction clearing would mess up PM
590 * device usage counters and SEP would get suspended
591 * just before we send a command to SEP in the next
594 if (sep_check_transaction_owner(sep
)) {
595 dev_dbg(&sep
->pdev
->dev
, "[PID%d] not transaction owner\n",
600 /* Update queue status */
601 sep_queue_status_remove(sep
, my_queue_elem
);
603 /* Check that all the DMA resources were freed */
605 sep_free_dma_table_data_handler(sep
, dma_ctx
);
607 /* Reset call status for next transaction */
609 call_status
->status
= 0;
611 /* Clear the message area to avoid next transaction reading
612 * sensitive results from previous transaction */
613 memset(sep
->shared_addr
, 0,
614 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
);
616 /* start suspend delay */
617 #ifdef SEP_ENABLE_RUNTIME_PM
620 pm_runtime_mark_last_busy(&sep
->pdev
->dev
);
621 pm_runtime_put_autosuspend(&sep
->pdev
->dev
);
625 clear_bit(SEP_WORKING_LOCK_BIT
, &sep
->in_use_flags
);
626 sep
->pid_doing_transaction
= 0;
628 /* Now it's safe for next process to proceed */
629 dev_dbg(&sep
->pdev
->dev
, "[PID%d] waking up next transaction\n",
631 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT
, &sep
->in_use_flags
);
632 wake_up(&sep
->event_transactions
);
639 * sep_release - close a SEP device
640 * @inode: inode of SEP device
641 * @filp: file handle being closed
643 * Called on the final close of a SEP device.
645 static int sep_release(struct inode
*inode
, struct file
*filp
)
647 struct sep_private_data
* const private_data
= filp
->private_data
;
648 struct sep_call_status
*call_status
= &private_data
->call_status
;
649 struct sep_device
*sep
= private_data
->device
;
650 struct sep_dma_context
**dma_ctx
= &private_data
->dma_ctx
;
651 struct sep_queue_info
**my_queue_elem
= &private_data
->my_queue_elem
;
653 dev_dbg(&sep
->pdev
->dev
, "[PID%d] release\n", current
->pid
);
655 sep_end_transaction_handler(sep
, dma_ctx
, call_status
,
658 kfree(filp
->private_data
);
664 * sep_mmap - maps the shared area to user space
665 * @filp: pointer to struct file
666 * @vma: pointer to vm_area_struct
668 * Called on an mmap of our space via the normal SEP device
670 static int sep_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
672 struct sep_private_data
* const private_data
= filp
->private_data
;
673 struct sep_call_status
*call_status
= &private_data
->call_status
;
674 struct sep_device
*sep
= private_data
->device
;
675 struct sep_queue_info
**my_queue_elem
= &private_data
->my_queue_elem
;
677 unsigned long error
= 0;
679 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_mmap\n", current
->pid
);
681 /* Set the transaction busy (own the device) */
683 * Problem for multithreaded applications is that here we're
684 * possibly going to sleep while holding a write lock on
685 * current->mm->mmap_sem, which will cause deadlock for ongoing
686 * transaction trying to create DMA tables
688 error
= sep_wait_transaction(sep
);
690 /* Interrupted by signal, don't clear transaction */
693 /* Clear the message area to avoid next transaction reading
694 * sensitive results from previous transaction */
695 memset(sep
->shared_addr
, 0,
696 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
);
699 * Check that the size of the mapped range is as the size of the message
702 if ((vma
->vm_end
- vma
->vm_start
) > SEP_DRIVER_MMMAP_AREA_SIZE
) {
704 goto end_function_with_error
;
707 dev_dbg(&sep
->pdev
->dev
, "[PID%d] shared_addr is %p\n",
708 current
->pid
, sep
->shared_addr
);
710 /* Get bus address */
711 bus_addr
= sep
->shared_bus
;
713 if (remap_pfn_range(vma
, vma
->vm_start
, bus_addr
>> PAGE_SHIFT
,
714 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
)) {
715 dev_dbg(&sep
->pdev
->dev
, "[PID%d] remap_pfn_range failed\n",
718 goto end_function_with_error
;
721 /* Update call status */
722 set_bit(SEP_LEGACY_MMAP_DONE_OFFSET
, &call_status
->status
);
726 end_function_with_error
:
727 /* Clear our transaction */
728 sep_end_transaction_handler(sep
, NULL
, call_status
,
736 * sep_poll - poll handler
737 * @filp: pointer to struct file
738 * @wait: pointer to poll_table
740 * Called by the OS when the kernel is asked to do a poll on
743 static unsigned int sep_poll(struct file
*filp
, poll_table
*wait
)
745 struct sep_private_data
* const private_data
= filp
->private_data
;
746 struct sep_call_status
*call_status
= &private_data
->call_status
;
747 struct sep_device
*sep
= private_data
->device
;
751 unsigned long lock_irq_flag
;
753 /* Am I the process that owns the transaction? */
754 if (sep_check_transaction_owner(sep
)) {
755 dev_dbg(&sep
->pdev
->dev
, "[PID%d] poll pid not owner\n",
761 /* Check if send command or send_reply were activated previously */
762 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
,
763 &call_status
->status
)) {
764 dev_warn(&sep
->pdev
->dev
, "[PID%d] sendmsg not called\n",
771 /* Add the event to the polling wait table */
772 dev_dbg(&sep
->pdev
->dev
, "[PID%d] poll: calling wait sep_event\n",
775 poll_wait(filp
, &sep
->event_interrupt
, wait
);
777 dev_dbg(&sep
->pdev
->dev
,
778 "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
779 current
->pid
, sep
->send_ct
, sep
->reply_ct
);
781 /* Check if error occurred during poll */
782 retval2
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR3_REG_ADDR
);
783 if ((retval2
!= 0x0) && (retval2
!= 0x8)) {
784 dev_dbg(&sep
->pdev
->dev
, "[PID%d] poll; poll error %x\n",
785 current
->pid
, retval2
);
790 spin_lock_irqsave(&sep
->snd_rply_lck
, lock_irq_flag
);
792 if (sep
->send_ct
== sep
->reply_ct
) {
793 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lock_irq_flag
);
794 retval
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
795 dev_dbg(&sep
->pdev
->dev
,
796 "[PID%d] poll: data ready check (GPR2) %x\n",
797 current
->pid
, retval
);
799 /* Check if printf request */
800 if ((retval
>> 30) & 0x1) {
801 dev_dbg(&sep
->pdev
->dev
,
802 "[PID%d] poll: SEP printf request\n",
807 /* Check if the this is SEP reply or request */
809 dev_dbg(&sep
->pdev
->dev
,
810 "[PID%d] poll: SEP request\n",
813 dev_dbg(&sep
->pdev
->dev
,
814 "[PID%d] poll: normal return\n",
816 sep_dump_message(sep
);
817 dev_dbg(&sep
->pdev
->dev
,
818 "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
820 mask
|= POLLIN
| POLLRDNORM
;
822 set_bit(SEP_LEGACY_POLL_DONE_OFFSET
, &call_status
->status
);
824 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lock_irq_flag
);
825 dev_dbg(&sep
->pdev
->dev
,
826 "[PID%d] poll; no reply; returning mask of 0\n",
836 * sep_time_address - address in SEP memory of time
837 * @sep: SEP device we want the address from
839 * Return the address of the two dwords in memory used for time
842 static u32
*sep_time_address(struct sep_device
*sep
)
844 return sep
->shared_addr
+
845 SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES
;
849 * sep_set_time - set the SEP time
850 * @sep: the SEP we are setting the time for
852 * Calculates time and sets it at the predefined address.
853 * Called with the SEP mutex held.
855 static unsigned long sep_set_time(struct sep_device
*sep
)
858 u32
*time_addr
; /* Address of time as seen by the kernel */
861 do_gettimeofday(&time
);
863 /* Set value in the SYSTEM MEMORY offset */
864 time_addr
= sep_time_address(sep
);
866 time_addr
[0] = SEP_TIME_VAL_TOKEN
;
867 time_addr
[1] = time
.tv_sec
;
869 dev_dbg(&sep
->pdev
->dev
, "[PID%d] time.tv_sec is %lu\n",
870 current
->pid
, time
.tv_sec
);
871 dev_dbg(&sep
->pdev
->dev
, "[PID%d] time_addr is %p\n",
872 current
->pid
, time_addr
);
873 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep->shared_addr is %p\n",
874 current
->pid
, sep
->shared_addr
);
880 * sep_send_command_handler - kick off a command
881 * @sep: SEP being signalled
883 * This function raises interrupt to SEP that signals that is has a new
884 * command from the host
886 * Note that this function does fall under the ioctl lock
888 int sep_send_command_handler(struct sep_device
*sep
)
890 unsigned long lock_irq_flag
;
894 /* Basic sanity check; set msg pool to start of shared area */
895 msg_pool
= (u32
*)sep
->shared_addr
;
898 /* Look for start msg token */
899 if (*msg_pool
!= SEP_START_MSG_TOKEN
) {
900 dev_warn(&sep
->pdev
->dev
, "start message token not present\n");
905 /* Do we have a reasonable size? */
907 if ((*msg_pool
< 2) ||
908 (*msg_pool
> SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES
)) {
910 dev_warn(&sep
->pdev
->dev
, "invalid message size\n");
915 /* Does the command look reasonable? */
918 dev_warn(&sep
->pdev
->dev
, "invalid message opcode\n");
923 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
924 dev_dbg(&sep
->pdev
->dev
, "[PID%d] before pm sync status 0x%X\n",
926 sep
->pdev
->dev
.power
.runtime_status
);
927 sep
->in_use
= 1; /* device is about to be used */
928 pm_runtime_get_sync(&sep
->pdev
->dev
);
931 if (test_and_set_bit(SEP_WORKING_LOCK_BIT
, &sep
->in_use_flags
)) {
935 sep
->in_use
= 1; /* device is about to be used */
938 sep_dump_message(sep
);
941 spin_lock_irqsave(&sep
->snd_rply_lck
, lock_irq_flag
);
943 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lock_irq_flag
);
945 dev_dbg(&sep
->pdev
->dev
,
946 "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
947 current
->pid
, sep
->send_ct
, sep
->reply_ct
);
949 /* Send interrupt to SEP */
950 sep_write_reg(sep
, HW_HOST_HOST_SEP_GPR0_REG_ADDR
, 0x2);
958 * @sep: pointer to struct sep_device
959 * @sg: pointer to struct scatterlist
961 * @dma_maps: pointer to place a pointer to array of dma maps
962 * This is filled in; anything previous there will be lost
963 * The structure for dma maps is sep_dma_map
964 * @returns number of dma maps on success; negative on error
966 * This creates the dma table from the scatterlist
967 * It is used only for kernel crypto as it works with scatterlists
968 * representation of data buffers
971 static int sep_crypto_dma(
972 struct sep_device
*sep
,
973 struct scatterlist
*sg
,
974 struct sep_dma_map
**dma_maps
,
975 enum dma_data_direction direction
)
977 struct scatterlist
*temp_sg
;
981 struct sep_dma_map
*sep_dma
;
987 /* Count the segments */
992 temp_sg
= scatterwalk_sg_next(temp_sg
);
994 dev_dbg(&sep
->pdev
->dev
,
995 "There are (hex) %x segments in sg\n", count_segment
);
997 /* DMA map segments */
998 count_mapped
= dma_map_sg(&sep
->pdev
->dev
, sg
,
999 count_segment
, direction
);
1001 dev_dbg(&sep
->pdev
->dev
,
1002 "There are (hex) %x maps in sg\n", count_mapped
);
1004 if (count_mapped
== 0) {
1005 dev_dbg(&sep
->pdev
->dev
, "Cannot dma_map_sg\n");
1009 sep_dma
= kmalloc(sizeof(struct sep_dma_map
) *
1010 count_mapped
, GFP_ATOMIC
);
1012 if (sep_dma
== NULL
) {
1013 dev_dbg(&sep
->pdev
->dev
, "Cannot allocate dma_maps\n");
1017 for_each_sg(sg
, temp_sg
, count_mapped
, ct1
) {
1018 sep_dma
[ct1
].dma_addr
= sg_dma_address(temp_sg
);
1019 sep_dma
[ct1
].size
= sg_dma_len(temp_sg
);
1020 dev_dbg(&sep
->pdev
->dev
, "(all hex) map %x dma %lx len %lx\n",
1021 ct1
, (unsigned long)sep_dma
[ct1
].dma_addr
,
1022 (unsigned long)sep_dma
[ct1
].size
);
1025 *dma_maps
= sep_dma
;
1026 return count_mapped
;
1032 * @sep: pointer to struct sep_device
1033 * @sg: pointer to struct scatterlist
1034 * @data_size: total data size
1036 * @dma_maps: pointer to place a pointer to array of dma maps
1037 * This is filled in; anything previous there will be lost
1038 * The structure for dma maps is sep_dma_map
1039 * @lli_maps: pointer to place a pointer to array of lli maps
1040 * This is filled in; anything previous there will be lost
1041 * The structure for dma maps is sep_dma_map
1042 * @returns number of dma maps on success; negative on error
1044 * This creates the LLI table from the scatterlist
1045 * It is only used for kernel crypto as it works exclusively
1046 * with scatterlists (struct scatterlist) representation of
1049 static int sep_crypto_lli(
1050 struct sep_device
*sep
,
1051 struct scatterlist
*sg
,
1052 struct sep_dma_map
**maps
,
1053 struct sep_lli_entry
**llis
,
1055 enum dma_data_direction direction
)
1059 struct sep_lli_entry
*sep_lli
;
1060 struct sep_dma_map
*sep_map
;
1064 nbr_ents
= sep_crypto_dma(sep
, sg
, maps
, direction
);
1065 if (nbr_ents
<= 0) {
1066 dev_dbg(&sep
->pdev
->dev
, "crypto_dma failed %x\n",
1073 sep_lli
= kmalloc(sizeof(struct sep_lli_entry
) * nbr_ents
, GFP_ATOMIC
);
1075 if (sep_lli
== NULL
) {
1076 dev_dbg(&sep
->pdev
->dev
, "Cannot allocate lli_maps\n");
1083 for (ct1
= 0; ct1
< nbr_ents
; ct1
+= 1) {
1084 sep_lli
[ct1
].bus_address
= (u32
)sep_map
[ct1
].dma_addr
;
1086 /* Maximum for page is total data size */
1087 if (sep_map
[ct1
].size
> data_size
)
1088 sep_map
[ct1
].size
= data_size
;
1090 sep_lli
[ct1
].block_size
= (u32
)sep_map
[ct1
].size
;
1098 * sep_lock_kernel_pages - map kernel pages for DMA
1099 * @sep: pointer to struct sep_device
1100 * @kernel_virt_addr: address of data buffer in kernel
1101 * @data_size: size of data
1102 * @lli_array_ptr: lli array
1103 * @in_out_flag: input into device or output from device
1105 * This function locks all the physical pages of the kernel virtual buffer
1106 * and construct a basic lli array, where each entry holds the physical
1107 * page address and the size that application data holds in this page
1108 * This function is used only during kernel crypto mod calls from within
1109 * the kernel (when ioctl is not used)
1111 * This is used only for kernel crypto. Kernel pages
1112 * are handled differently as they are done via
1113 * scatter gather lists (struct scatterlist)
1115 static int sep_lock_kernel_pages(struct sep_device
*sep
,
1116 unsigned long kernel_virt_addr
,
1118 struct sep_lli_entry
**lli_array_ptr
,
1120 struct sep_dma_context
*dma_ctx
)
1124 struct scatterlist
*sg
;
1127 struct sep_lli_entry
*lli_array
;
1129 struct sep_dma_map
*map_array
;
1131 enum dma_data_direction direction
;
1136 if (in_out_flag
== SEP_DRIVER_IN_FLAG
) {
1137 direction
= DMA_TO_DEVICE
;
1138 sg
= dma_ctx
->src_sg
;
1140 direction
= DMA_FROM_DEVICE
;
1141 sg
= dma_ctx
->dst_sg
;
1144 num_pages
= sep_crypto_lli(sep
, sg
, &map_array
, &lli_array
,
1145 data_size
, direction
);
1147 if (num_pages
<= 0) {
1148 dev_dbg(&sep
->pdev
->dev
, "sep_crypto_lli returned error %x\n",
1153 /* Put mapped kernel sg into kernel resource array */
1155 /* Set output params according to the in_out flag */
1156 if (in_out_flag
== SEP_DRIVER_IN_FLAG
) {
1157 *lli_array_ptr
= lli_array
;
1158 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
=
1160 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
=
1162 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
=
1164 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_num_entries
=
1166 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].src_sg
=
1169 *lli_array_ptr
= lli_array
;
1170 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_num_pages
=
1172 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
=
1174 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
=
1176 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].
1177 out_map_num_entries
= num_pages
;
1178 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].dst_sg
=
1186 * sep_lock_user_pages - lock and map user pages for DMA
1187 * @sep: pointer to struct sep_device
1188 * @app_virt_addr: user memory data buffer
1189 * @data_size: size of data buffer
1190 * @lli_array_ptr: lli array
1191 * @in_out_flag: input or output to device
1193 * This function locks all the physical pages of the application
1194 * virtual buffer and construct a basic lli array, where each entry
1195 * holds the physical page address and the size that application
1196 * data holds in this physical pages
1198 static int sep_lock_user_pages(struct sep_device
*sep
,
1201 struct sep_lli_entry
**lli_array_ptr
,
1203 struct sep_dma_context
*dma_ctx
)
1209 /* The the page of the end address of the user space buffer */
1211 /* The page of the start address of the user space buffer */
1213 /* The range in pages */
1215 /* Array of pointers to page */
1216 struct page
**page_array
;
1218 struct sep_lli_entry
*lli_array
;
1220 struct sep_dma_map
*map_array
;
1222 /* Set start and end pages and num pages */
1223 end_page
= (app_virt_addr
+ data_size
- 1) >> PAGE_SHIFT
;
1224 start_page
= app_virt_addr
>> PAGE_SHIFT
;
1225 num_pages
= end_page
- start_page
+ 1;
1227 dev_dbg(&sep
->pdev
->dev
,
1228 "[PID%d] lock user pages app_virt_addr is %x\n",
1229 current
->pid
, app_virt_addr
);
1231 dev_dbg(&sep
->pdev
->dev
, "[PID%d] data_size is (hex) %x\n",
1232 current
->pid
, data_size
);
1233 dev_dbg(&sep
->pdev
->dev
, "[PID%d] start_page is (hex) %x\n",
1234 current
->pid
, start_page
);
1235 dev_dbg(&sep
->pdev
->dev
, "[PID%d] end_page is (hex) %x\n",
1236 current
->pid
, end_page
);
1237 dev_dbg(&sep
->pdev
->dev
, "[PID%d] num_pages is (hex) %x\n",
1238 current
->pid
, num_pages
);
1240 /* Allocate array of pages structure pointers */
1241 page_array
= kmalloc_array(num_pages
, sizeof(struct page
*),
1248 map_array
= kmalloc_array(num_pages
, sizeof(struct sep_dma_map
),
1252 goto end_function_with_error1
;
1255 lli_array
= kmalloc_array(num_pages
, sizeof(struct sep_lli_entry
),
1259 goto end_function_with_error2
;
1262 /* Convert the application virtual address into a set of physical */
1263 result
= get_user_pages_fast(app_virt_addr
, num_pages
,
1264 ((in_out_flag
== SEP_DRIVER_IN_FLAG
) ? 0 : 1), page_array
);
1266 /* Check the number of pages locked - if not all then exit with error */
1267 if (result
!= num_pages
) {
1268 dev_warn(&sep
->pdev
->dev
,
1269 "[PID%d] not all pages locked by get_user_pages, result 0x%X, num_pages 0x%X\n",
1270 current
->pid
, result
, num_pages
);
1272 goto end_function_with_error3
;
1275 dev_dbg(&sep
->pdev
->dev
, "[PID%d] get_user_pages succeeded\n",
1279 * Fill the array using page array data and
1280 * map the pages - this action will also flush the cache as needed
1282 for (count
= 0; count
< num_pages
; count
++) {
1283 /* Fill the map array */
1284 map_array
[count
].dma_addr
=
1285 dma_map_page(&sep
->pdev
->dev
, page_array
[count
],
1286 0, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
1288 map_array
[count
].size
= PAGE_SIZE
;
1290 /* Fill the lli array entry */
1291 lli_array
[count
].bus_address
= (u32
)map_array
[count
].dma_addr
;
1292 lli_array
[count
].block_size
= PAGE_SIZE
;
1294 dev_dbg(&sep
->pdev
->dev
,
1295 "[PID%d] lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is (hex) %x\n",
1296 current
->pid
, count
,
1297 (unsigned long)lli_array
[count
].bus_address
,
1298 count
, lli_array
[count
].block_size
);
1301 /* Check the offset for the first page */
1302 lli_array
[0].bus_address
=
1303 lli_array
[0].bus_address
+ (app_virt_addr
& (~PAGE_MASK
));
1305 /* Check that not all the data is in the first page only */
1306 if ((PAGE_SIZE
- (app_virt_addr
& (~PAGE_MASK
))) >= data_size
)
1307 lli_array
[0].block_size
= data_size
;
1309 lli_array
[0].block_size
=
1310 PAGE_SIZE
- (app_virt_addr
& (~PAGE_MASK
));
1312 dev_dbg(&sep
->pdev
->dev
,
1313 "[PID%d] After check if page 0 has all data\n",
1315 dev_dbg(&sep
->pdev
->dev
,
1316 "[PID%d] lli_array[0].bus_address is (hex) %08lx, lli_array[0].block_size is (hex) %x\n",
1318 (unsigned long)lli_array
[0].bus_address
,
1319 lli_array
[0].block_size
);
1322 /* Check the size of the last page */
1323 if (num_pages
> 1) {
1324 lli_array
[num_pages
- 1].block_size
=
1325 (app_virt_addr
+ data_size
) & (~PAGE_MASK
);
1326 if (lli_array
[num_pages
- 1].block_size
== 0)
1327 lli_array
[num_pages
- 1].block_size
= PAGE_SIZE
;
1329 dev_dbg(&sep
->pdev
->dev
,
1330 "[PID%d] After last page size adjustment\n",
1332 dev_dbg(&sep
->pdev
->dev
,
1333 "[PID%d] lli_array[%x].bus_address is (hex) %08lx, lli_array[%x].block_size is (hex) %x\n",
1336 (unsigned long)lli_array
[num_pages
- 1].bus_address
,
1338 lli_array
[num_pages
- 1].block_size
);
1341 /* Set output params according to the in_out flag */
1342 if (in_out_flag
== SEP_DRIVER_IN_FLAG
) {
1343 *lli_array_ptr
= lli_array
;
1344 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
=
1346 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
=
1348 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
=
1350 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_num_entries
=
1352 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].src_sg
= NULL
;
1354 *lli_array_ptr
= lli_array
;
1355 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_num_pages
=
1357 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
=
1359 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
=
1361 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].
1362 out_map_num_entries
= num_pages
;
1363 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].dst_sg
= NULL
;
1367 end_function_with_error3
:
1368 /* Free lli array */
1371 end_function_with_error2
:
1374 end_function_with_error1
:
1375 /* Free page array */
1383 * sep_lli_table_secure_dma - get lli array for IMR addresses
1384 * @sep: pointer to struct sep_device
1385 * @app_virt_addr: user memory data buffer
1386 * @data_size: size of data buffer
1387 * @lli_array_ptr: lli array
1388 * @in_out_flag: not used
1389 * @dma_ctx: pointer to struct sep_dma_context
1391 * This function creates lli tables for outputting data to
1392 * IMR memory, which is memory that cannot be accessed by the
1393 * the x86 processor.
1395 static int sep_lli_table_secure_dma(struct sep_device
*sep
,
1398 struct sep_lli_entry
**lli_array_ptr
,
1400 struct sep_dma_context
*dma_ctx
)
1404 /* The the page of the end address of the user space buffer */
1406 /* The page of the start address of the user space buffer */
1408 /* The range in pages */
1411 struct sep_lli_entry
*lli_array
;
1413 /* Set start and end pages and num pages */
1414 end_page
= (app_virt_addr
+ data_size
- 1) >> PAGE_SHIFT
;
1415 start_page
= app_virt_addr
>> PAGE_SHIFT
;
1416 num_pages
= end_page
- start_page
+ 1;
1418 dev_dbg(&sep
->pdev
->dev
,
1419 "[PID%d] lock user pages app_virt_addr is %x\n",
1420 current
->pid
, app_virt_addr
);
1422 dev_dbg(&sep
->pdev
->dev
, "[PID%d] data_size is (hex) %x\n",
1423 current
->pid
, data_size
);
1424 dev_dbg(&sep
->pdev
->dev
, "[PID%d] start_page is (hex) %x\n",
1425 current
->pid
, start_page
);
1426 dev_dbg(&sep
->pdev
->dev
, "[PID%d] end_page is (hex) %x\n",
1427 current
->pid
, end_page
);
1428 dev_dbg(&sep
->pdev
->dev
, "[PID%d] num_pages is (hex) %x\n",
1429 current
->pid
, num_pages
);
1431 lli_array
= kmalloc_array(num_pages
, sizeof(struct sep_lli_entry
),
1437 * Fill the lli_array
1439 start_page
= start_page
<< PAGE_SHIFT
;
1440 for (count
= 0; count
< num_pages
; count
++) {
1441 /* Fill the lli array entry */
1442 lli_array
[count
].bus_address
= start_page
;
1443 lli_array
[count
].block_size
= PAGE_SIZE
;
1445 start_page
+= PAGE_SIZE
;
1447 dev_dbg(&sep
->pdev
->dev
,
1448 "[PID%d] lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is (hex) %x\n",
1450 count
, (unsigned long)lli_array
[count
].bus_address
,
1451 count
, lli_array
[count
].block_size
);
1454 /* Check the offset for the first page */
1455 lli_array
[0].bus_address
=
1456 lli_array
[0].bus_address
+ (app_virt_addr
& (~PAGE_MASK
));
1458 /* Check that not all the data is in the first page only */
1459 if ((PAGE_SIZE
- (app_virt_addr
& (~PAGE_MASK
))) >= data_size
)
1460 lli_array
[0].block_size
= data_size
;
1462 lli_array
[0].block_size
=
1463 PAGE_SIZE
- (app_virt_addr
& (~PAGE_MASK
));
1465 dev_dbg(&sep
->pdev
->dev
,
1466 "[PID%d] After check if page 0 has all data\n"
1467 "lli_array[0].bus_address is (hex) %08lx, lli_array[0].block_size is (hex) %x\n",
1469 (unsigned long)lli_array
[0].bus_address
,
1470 lli_array
[0].block_size
);
1472 /* Check the size of the last page */
1473 if (num_pages
> 1) {
1474 lli_array
[num_pages
- 1].block_size
=
1475 (app_virt_addr
+ data_size
) & (~PAGE_MASK
);
1476 if (lli_array
[num_pages
- 1].block_size
== 0)
1477 lli_array
[num_pages
- 1].block_size
= PAGE_SIZE
;
1479 dev_dbg(&sep
->pdev
->dev
,
1480 "[PID%d] After last page size adjustment\n"
1481 "lli_array[%x].bus_address is (hex) %08lx, lli_array[%x].block_size is (hex) %x\n",
1482 current
->pid
, num_pages
- 1,
1483 (unsigned long)lli_array
[num_pages
- 1].bus_address
,
1485 lli_array
[num_pages
- 1].block_size
);
1487 *lli_array_ptr
= lli_array
;
1488 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_num_pages
= num_pages
;
1489 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
= NULL
;
1490 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
= NULL
;
1491 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_num_entries
= 0;
1497 * sep_calculate_lli_table_max_size - size the LLI table
1498 * @sep: pointer to struct sep_device
1500 * @num_array_entries
1503 * This function calculates the size of data that can be inserted into
1504 * the lli table from this array, such that either the table is full
1505 * (all entries are entered), or there are no more entries in the
1508 static u32
sep_calculate_lli_table_max_size(struct sep_device
*sep
,
1509 struct sep_lli_entry
*lli_in_array_ptr
,
1510 u32 num_array_entries
,
1511 u32
*last_table_flag
)
1514 /* Table data size */
1515 u32 table_data_size
= 0;
1516 /* Data size for the next table */
1517 u32 next_table_data_size
;
1519 *last_table_flag
= 0;
1522 * Calculate the data in the out lli table till we fill the whole
1523 * table or till the data has ended
1526 (counter
< (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
- 1)) &&
1527 (counter
< num_array_entries
); counter
++)
1528 table_data_size
+= lli_in_array_ptr
[counter
].block_size
;
1531 * Check if we reached the last entry,
1532 * meaning this ia the last table to build,
1533 * and no need to check the block alignment
1535 if (counter
== num_array_entries
) {
1536 /* Set the last table flag */
1537 *last_table_flag
= 1;
1542 * Calculate the data size of the next table.
1543 * Stop if no entries left or if data size is more the DMA restriction
1545 next_table_data_size
= 0;
1546 for (; counter
< num_array_entries
; counter
++) {
1547 next_table_data_size
+= lli_in_array_ptr
[counter
].block_size
;
1548 if (next_table_data_size
>= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
)
1553 * Check if the next table data size is less then DMA rstriction.
1554 * if it is - recalculate the current table size, so that the next
1555 * table data size will be adaquete for DMA
1557 if (next_table_data_size
&&
1558 next_table_data_size
< SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
)
1560 table_data_size
-= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
-
1561 next_table_data_size
);
1564 return table_data_size
;
1568 * sep_build_lli_table - build an lli array for the given table
1569 * @sep: pointer to struct sep_device
1570 * @lli_array_ptr: pointer to lli array
1571 * @lli_table_ptr: pointer to lli table
1572 * @num_processed_entries_ptr: pointer to number of entries
1573 * @num_table_entries_ptr: pointer to number of tables
1574 * @table_data_size: total data size
1576 * Builds an lli table from the lli_array according to
1577 * the given size of data
1579 static void sep_build_lli_table(struct sep_device
*sep
,
1580 struct sep_lli_entry
*lli_array_ptr
,
1581 struct sep_lli_entry
*lli_table_ptr
,
1582 u32
*num_processed_entries_ptr
,
1583 u32
*num_table_entries_ptr
,
1584 u32 table_data_size
)
1586 /* Current table data size */
1587 u32 curr_table_data_size
;
1588 /* Counter of lli array entry */
1591 /* Init current table data size and lli array entry counter */
1592 curr_table_data_size
= 0;
1594 *num_table_entries_ptr
= 1;
1596 dev_dbg(&sep
->pdev
->dev
,
1597 "[PID%d] build lli table table_data_size: (hex) %x\n",
1598 current
->pid
, table_data_size
);
1600 /* Fill the table till table size reaches the needed amount */
1601 while (curr_table_data_size
< table_data_size
) {
1602 /* Update the number of entries in table */
1603 (*num_table_entries_ptr
)++;
1605 lli_table_ptr
->bus_address
=
1606 cpu_to_le32(lli_array_ptr
[array_counter
].bus_address
);
1608 lli_table_ptr
->block_size
=
1609 cpu_to_le32(lli_array_ptr
[array_counter
].block_size
);
1611 curr_table_data_size
+= lli_array_ptr
[array_counter
].block_size
;
1613 dev_dbg(&sep
->pdev
->dev
,
1614 "[PID%d] lli_table_ptr is %p\n",
1615 current
->pid
, lli_table_ptr
);
1616 dev_dbg(&sep
->pdev
->dev
,
1617 "[PID%d] lli_table_ptr->bus_address: %08lx\n",
1619 (unsigned long)lli_table_ptr
->bus_address
);
1621 dev_dbg(&sep
->pdev
->dev
,
1622 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1623 current
->pid
, lli_table_ptr
->block_size
);
1625 /* Check for overflow of the table data */
1626 if (curr_table_data_size
> table_data_size
) {
1627 dev_dbg(&sep
->pdev
->dev
,
1628 "[PID%d] curr_table_data_size too large\n",
1631 /* Update the size of block in the table */
1632 lli_table_ptr
->block_size
=
1633 cpu_to_le32(lli_table_ptr
->block_size
) -
1634 (curr_table_data_size
- table_data_size
);
1636 /* Update the physical address in the lli array */
1637 lli_array_ptr
[array_counter
].bus_address
+=
1638 cpu_to_le32(lli_table_ptr
->block_size
);
1640 /* Update the block size left in the lli array */
1641 lli_array_ptr
[array_counter
].block_size
=
1642 (curr_table_data_size
- table_data_size
);
1644 /* Advance to the next entry in the lli_array */
1647 dev_dbg(&sep
->pdev
->dev
,
1648 "[PID%d] lli_table_ptr->bus_address is %08lx\n",
1650 (unsigned long)lli_table_ptr
->bus_address
);
1651 dev_dbg(&sep
->pdev
->dev
,
1652 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1654 lli_table_ptr
->block_size
);
1656 /* Move to the next entry in table */
1660 /* Set the info entry to default */
1661 lli_table_ptr
->bus_address
= 0xffffffff;
1662 lli_table_ptr
->block_size
= 0;
1664 /* Set the output parameter */
1665 *num_processed_entries_ptr
+= array_counter
;
1670 * sep_shared_area_virt_to_bus - map shared area to bus address
1671 * @sep: pointer to struct sep_device
1672 * @virt_address: virtual address to convert
1674 * This functions returns the physical address inside shared area according
1675 * to the virtual address. It can be either on the external RAM device
1676 * (ioremapped), or on the system RAM
1677 * This implementation is for the external RAM
1679 static dma_addr_t
sep_shared_area_virt_to_bus(struct sep_device
*sep
,
1682 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sh virt to phys v %p\n",
1683 current
->pid
, virt_address
);
1684 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sh virt to phys p %08lx\n",
1687 sep
->shared_bus
+ (virt_address
- sep
->shared_addr
));
1689 return sep
->shared_bus
+ (size_t)(virt_address
- sep
->shared_addr
);
1693 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1694 * @sep: pointer to struct sep_device
1695 * @bus_address: bus address to convert
1697 * This functions returns the virtual address inside shared area
1698 * according to the physical address. It can be either on the
1699 * external RAM device (ioremapped), or on the system RAM
1700 * This implementation is for the external RAM
1702 static void *sep_shared_area_bus_to_virt(struct sep_device
*sep
,
1703 dma_addr_t bus_address
)
1705 dev_dbg(&sep
->pdev
->dev
, "[PID%d] shared bus to virt b=%lx v=%lx\n",
1707 (unsigned long)bus_address
, (unsigned long)(sep
->shared_addr
+
1708 (size_t)(bus_address
- sep
->shared_bus
)));
1710 return sep
->shared_addr
+ (size_t)(bus_address
- sep
->shared_bus
);
1714 * sep_debug_print_lli_tables - dump LLI table
1715 * @sep: pointer to struct sep_device
1716 * @lli_table_ptr: pointer to sep_lli_entry
1717 * @num_table_entries: number of entries
1718 * @table_data_size: total data size
1720 * Walk the the list of the print created tables and print all the data
1722 static void sep_debug_print_lli_tables(struct sep_device
*sep
,
1723 struct sep_lli_entry
*lli_table_ptr
,
1724 unsigned long num_table_entries
,
1725 unsigned long table_data_size
)
1728 unsigned long table_count
= 1;
1729 unsigned long entries_count
= 0;
1731 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_debug_print_lli_tables start\n",
1733 if (num_table_entries
== 0) {
1734 dev_dbg(&sep
->pdev
->dev
, "[PID%d] no table to print\n",
1739 while ((unsigned long) lli_table_ptr
->bus_address
!= 0xffffffff) {
1740 dev_dbg(&sep
->pdev
->dev
,
1741 "[PID%d] lli table %08lx, table_data_size is (hex) %lx\n",
1742 current
->pid
, table_count
, table_data_size
);
1743 dev_dbg(&sep
->pdev
->dev
,
1744 "[PID%d] num_table_entries is (hex) %lx\n",
1745 current
->pid
, num_table_entries
);
1747 /* Print entries of the table (without info entry) */
1748 for (entries_count
= 0; entries_count
< num_table_entries
;
1749 entries_count
++, lli_table_ptr
++) {
1751 dev_dbg(&sep
->pdev
->dev
,
1752 "[PID%d] lli_table_ptr address is %08lx\n",
1754 (unsigned long) lli_table_ptr
);
1756 dev_dbg(&sep
->pdev
->dev
,
1757 "[PID%d] phys address is %08lx block size is (hex) %x\n",
1759 (unsigned long)lli_table_ptr
->bus_address
,
1760 lli_table_ptr
->block_size
);
1763 /* Point to the info entry */
1766 dev_dbg(&sep
->pdev
->dev
,
1767 "[PID%d] phys lli_table_ptr->block_size is (hex) %x\n",
1769 lli_table_ptr
->block_size
);
1771 dev_dbg(&sep
->pdev
->dev
,
1772 "[PID%d] phys lli_table_ptr->physical_address is %08lx\n",
1774 (unsigned long)lli_table_ptr
->bus_address
);
1777 table_data_size
= lli_table_ptr
->block_size
& 0xffffff;
1778 num_table_entries
= (lli_table_ptr
->block_size
>> 24) & 0xff;
1780 dev_dbg(&sep
->pdev
->dev
,
1781 "[PID%d] phys table_data_size is (hex) %lx num_table_entries is %lx bus_address is%lx\n",
1785 (unsigned long)lli_table_ptr
->bus_address
);
1787 if ((unsigned long)lli_table_ptr
->bus_address
!= 0xffffffff)
1788 lli_table_ptr
= (struct sep_lli_entry
*)
1789 sep_shared_bus_to_virt(sep
,
1790 (unsigned long)lli_table_ptr
->bus_address
);
1794 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_debug_print_lli_tables end\n",
1801 * sep_prepare_empty_lli_table - create a blank LLI table
1802 * @sep: pointer to struct sep_device
1803 * @lli_table_addr_ptr: pointer to lli table
1804 * @num_entries_ptr: pointer to number of entries
1805 * @table_data_size_ptr: point to table data size
1806 * @dmatables_region: Optional buffer for DMA tables
1807 * @dma_ctx: DMA context
1809 * This function creates empty lli tables when there is no data
1811 static void sep_prepare_empty_lli_table(struct sep_device
*sep
,
1812 dma_addr_t
*lli_table_addr_ptr
,
1813 u32
*num_entries_ptr
,
1814 u32
*table_data_size_ptr
,
1815 void **dmatables_region
,
1816 struct sep_dma_context
*dma_ctx
)
1818 struct sep_lli_entry
*lli_table_ptr
;
1820 /* Find the area for new table */
1822 (struct sep_lli_entry
*)(sep
->shared_addr
+
1823 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1824 dma_ctx
->num_lli_tables_created
* sizeof(struct sep_lli_entry
) *
1825 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
);
1827 if (dmatables_region
&& *dmatables_region
)
1828 lli_table_ptr
= *dmatables_region
;
1830 lli_table_ptr
->bus_address
= 0;
1831 lli_table_ptr
->block_size
= 0;
1834 lli_table_ptr
->bus_address
= 0xFFFFFFFF;
1835 lli_table_ptr
->block_size
= 0;
1837 /* Set the output parameter value */
1838 *lli_table_addr_ptr
= sep
->shared_bus
+
1839 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1840 dma_ctx
->num_lli_tables_created
*
1841 sizeof(struct sep_lli_entry
) *
1842 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
1844 /* Set the num of entries and table data size for empty table */
1845 *num_entries_ptr
= 2;
1846 *table_data_size_ptr
= 0;
1848 /* Update the number of created tables */
1849 dma_ctx
->num_lli_tables_created
++;
1853 * sep_prepare_input_dma_table - prepare input DMA mappings
1854 * @sep: pointer to struct sep_device
1859 * @table_data_size_ptr:
1860 * @is_kva: set for kernel data (kernel crypt io call)
1862 * This function prepares only input DMA table for synchronic symmetric
1864 * Note that all bus addresses that are passed to the SEP
1865 * are in 32 bit format; the SEP is a 32 bit device
1867 static int sep_prepare_input_dma_table(struct sep_device
*sep
,
1868 unsigned long app_virt_addr
,
1871 dma_addr_t
*lli_table_ptr
,
1872 u32
*num_entries_ptr
,
1873 u32
*table_data_size_ptr
,
1875 void **dmatables_region
,
1876 struct sep_dma_context
*dma_ctx
1880 /* Pointer to the info entry of the table - the last entry */
1881 struct sep_lli_entry
*info_entry_ptr
;
1882 /* Array of pointers to page */
1883 struct sep_lli_entry
*lli_array_ptr
;
1884 /* Points to the first entry to be processed in the lli_in_array */
1885 u32 current_entry
= 0;
1886 /* Num entries in the virtual buffer */
1887 u32 sep_lli_entries
= 0;
1888 /* Lli table pointer */
1889 struct sep_lli_entry
*in_lli_table_ptr
;
1890 /* The total data in one table */
1891 u32 table_data_size
= 0;
1892 /* Flag for last table */
1893 u32 last_table_flag
= 0;
1894 /* Number of entries in lli table */
1895 u32 num_entries_in_table
= 0;
1896 /* Next table address */
1897 void *lli_table_alloc_addr
= NULL
;
1898 void *dma_lli_table_alloc_addr
= NULL
;
1899 void *dma_in_lli_table_ptr
= NULL
;
1901 dev_dbg(&sep
->pdev
->dev
,
1902 "[PID%d] prepare intput dma tbl data size: (hex) %x\n",
1903 current
->pid
, data_size
);
1905 dev_dbg(&sep
->pdev
->dev
, "[PID%d] block_size is (hex) %x\n",
1906 current
->pid
, block_size
);
1908 /* Initialize the pages pointers */
1909 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
= NULL
;
1910 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
= 0;
1912 /* Set the kernel address for first table to be allocated */
1913 lli_table_alloc_addr
= (void *)(sep
->shared_addr
+
1914 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1915 dma_ctx
->num_lli_tables_created
* sizeof(struct sep_lli_entry
) *
1916 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
);
1918 if (data_size
== 0) {
1919 if (dmatables_region
) {
1920 error
= sep_allocate_dmatables_region(sep
,
1927 /* Special case - create meptu table - 2 entries, zero data */
1928 sep_prepare_empty_lli_table(sep
, lli_table_ptr
,
1929 num_entries_ptr
, table_data_size_ptr
,
1930 dmatables_region
, dma_ctx
);
1931 goto update_dcb_counter
;
1934 /* Check if the pages are in Kernel Virtual Address layout */
1936 error
= sep_lock_kernel_pages(sep
, app_virt_addr
,
1937 data_size
, &lli_array_ptr
, SEP_DRIVER_IN_FLAG
,
1941 * Lock the pages of the user buffer
1942 * and translate them to pages
1944 error
= sep_lock_user_pages(sep
, app_virt_addr
,
1945 data_size
, &lli_array_ptr
, SEP_DRIVER_IN_FLAG
,
1951 dev_dbg(&sep
->pdev
->dev
,
1952 "[PID%d] output sep_in_num_pages is (hex) %x\n",
1954 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
);
1957 info_entry_ptr
= NULL
;
1960 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
;
1962 dma_lli_table_alloc_addr
= lli_table_alloc_addr
;
1963 if (dmatables_region
) {
1964 error
= sep_allocate_dmatables_region(sep
,
1969 goto end_function_error
;
1970 lli_table_alloc_addr
= *dmatables_region
;
1973 /* Loop till all the entries in in array are processed */
1974 while (current_entry
< sep_lli_entries
) {
1976 /* Set the new input and output tables */
1978 (struct sep_lli_entry
*)lli_table_alloc_addr
;
1979 dma_in_lli_table_ptr
=
1980 (struct sep_lli_entry
*)dma_lli_table_alloc_addr
;
1982 lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
1983 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
1984 dma_lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
1985 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
1987 if (dma_lli_table_alloc_addr
>
1988 ((void *)sep
->shared_addr
+
1989 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1990 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
)) {
1993 goto end_function_error
;
1997 /* Update the number of created tables */
1998 dma_ctx
->num_lli_tables_created
++;
2000 /* Calculate the maximum size of data for input table */
2001 table_data_size
= sep_calculate_lli_table_max_size(sep
,
2002 &lli_array_ptr
[current_entry
],
2003 (sep_lli_entries
- current_entry
),
2007 * If this is not the last table -
2008 * then align it to the block size
2010 if (!last_table_flag
)
2012 (table_data_size
/ block_size
) * block_size
;
2014 dev_dbg(&sep
->pdev
->dev
,
2015 "[PID%d] output table_data_size is (hex) %x\n",
2019 /* Construct input lli table */
2020 sep_build_lli_table(sep
, &lli_array_ptr
[current_entry
],
2022 ¤t_entry
, &num_entries_in_table
, table_data_size
);
2024 if (info_entry_ptr
== NULL
) {
2026 /* Set the output parameters to physical addresses */
2027 *lli_table_ptr
= sep_shared_area_virt_to_bus(sep
,
2028 dma_in_lli_table_ptr
);
2029 *num_entries_ptr
= num_entries_in_table
;
2030 *table_data_size_ptr
= table_data_size
;
2032 dev_dbg(&sep
->pdev
->dev
,
2033 "[PID%d] output lli_table_in_ptr is %08lx\n",
2035 (unsigned long)*lli_table_ptr
);
2038 /* Update the info entry of the previous in table */
2039 info_entry_ptr
->bus_address
=
2040 sep_shared_area_virt_to_bus(sep
,
2041 dma_in_lli_table_ptr
);
2042 info_entry_ptr
->block_size
=
2043 ((num_entries_in_table
) << 24) |
2046 /* Save the pointer to the info entry of the current tables */
2047 info_entry_ptr
= in_lli_table_ptr
+ num_entries_in_table
- 1;
2049 /* Print input tables */
2050 if (!dmatables_region
) {
2051 sep_debug_print_lli_tables(sep
, (struct sep_lli_entry
*)
2052 sep_shared_area_bus_to_virt(sep
, *lli_table_ptr
),
2053 *num_entries_ptr
, *table_data_size_ptr
);
2056 /* The array of the pages */
2057 kfree(lli_array_ptr
);
2060 /* Update DCB counter */
2061 dma_ctx
->nr_dcb_creat
++;
2065 /* Free all the allocated resources */
2066 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
);
2067 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
= NULL
;
2068 kfree(lli_array_ptr
);
2069 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
);
2070 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
= NULL
;
2078 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
2079 * @sep: pointer to struct sep_device
2081 * @sep_in_lli_entries:
2083 * @sep_out_lli_entries
2086 * @lli_table_out_ptr
2087 * @in_num_entries_ptr
2088 * @out_num_entries_ptr
2089 * @table_data_size_ptr
2091 * This function creates the input and output DMA tables for
2092 * symmetric operations (AES/DES) according to the block
2093 * size from LLI arays
2094 * Note that all bus addresses that are passed to the SEP
2095 * are in 32 bit format; the SEP is a 32 bit device
2097 static int sep_construct_dma_tables_from_lli(
2098 struct sep_device
*sep
,
2099 struct sep_lli_entry
*lli_in_array
,
2100 u32 sep_in_lli_entries
,
2101 struct sep_lli_entry
*lli_out_array
,
2102 u32 sep_out_lli_entries
,
2104 dma_addr_t
*lli_table_in_ptr
,
2105 dma_addr_t
*lli_table_out_ptr
,
2106 u32
*in_num_entries_ptr
,
2107 u32
*out_num_entries_ptr
,
2108 u32
*table_data_size_ptr
,
2109 void **dmatables_region
,
2110 struct sep_dma_context
*dma_ctx
)
2112 /* Points to the area where next lli table can be allocated */
2113 void *lli_table_alloc_addr
= NULL
;
2115 * Points to the area in shared region where next lli table
2118 void *dma_lli_table_alloc_addr
= NULL
;
2119 /* Input lli table in dmatables_region or shared region */
2120 struct sep_lli_entry
*in_lli_table_ptr
= NULL
;
2121 /* Input lli table location in the shared region */
2122 struct sep_lli_entry
*dma_in_lli_table_ptr
= NULL
;
2123 /* Output lli table in dmatables_region or shared region */
2124 struct sep_lli_entry
*out_lli_table_ptr
= NULL
;
2125 /* Output lli table location in the shared region */
2126 struct sep_lli_entry
*dma_out_lli_table_ptr
= NULL
;
2127 /* Pointer to the info entry of the table - the last entry */
2128 struct sep_lli_entry
*info_in_entry_ptr
= NULL
;
2129 /* Pointer to the info entry of the table - the last entry */
2130 struct sep_lli_entry
*info_out_entry_ptr
= NULL
;
2131 /* Points to the first entry to be processed in the lli_in_array */
2132 u32 current_in_entry
= 0;
2133 /* Points to the first entry to be processed in the lli_out_array */
2134 u32 current_out_entry
= 0;
2135 /* Max size of the input table */
2136 u32 in_table_data_size
= 0;
2137 /* Max size of the output table */
2138 u32 out_table_data_size
= 0;
2139 /* Flag te signifies if this is the last tables build */
2140 u32 last_table_flag
= 0;
2141 /* The data size that should be in table */
2142 u32 table_data_size
= 0;
2143 /* Number of entries in the input table */
2144 u32 num_entries_in_table
= 0;
2145 /* Number of entries in the output table */
2146 u32 num_entries_out_table
= 0;
2149 dev_warn(&sep
->pdev
->dev
, "DMA context uninitialized\n");
2153 /* Initiate to point after the message area */
2154 lli_table_alloc_addr
= (void *)(sep
->shared_addr
+
2155 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
2156 (dma_ctx
->num_lli_tables_created
*
2157 (sizeof(struct sep_lli_entry
) *
2158 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
)));
2159 dma_lli_table_alloc_addr
= lli_table_alloc_addr
;
2161 if (dmatables_region
) {
2162 /* 2 for both in+out table */
2163 if (sep_allocate_dmatables_region(sep
,
2166 2*sep_in_lli_entries
))
2168 lli_table_alloc_addr
= *dmatables_region
;
2171 /* Loop till all the entries in in array are not processed */
2172 while (current_in_entry
< sep_in_lli_entries
) {
2173 /* Set the new input and output tables */
2175 (struct sep_lli_entry
*)lli_table_alloc_addr
;
2176 dma_in_lli_table_ptr
=
2177 (struct sep_lli_entry
*)dma_lli_table_alloc_addr
;
2179 lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2180 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2181 dma_lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2182 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2184 /* Set the first output tables */
2186 (struct sep_lli_entry
*)lli_table_alloc_addr
;
2187 dma_out_lli_table_ptr
=
2188 (struct sep_lli_entry
*)dma_lli_table_alloc_addr
;
2190 /* Check if the DMA table area limit was overrun */
2191 if ((dma_lli_table_alloc_addr
+ sizeof(struct sep_lli_entry
) *
2192 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
) >
2193 ((void *)sep
->shared_addr
+
2194 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
2195 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
)) {
2197 dev_warn(&sep
->pdev
->dev
, "dma table limit overrun\n");
2201 /* Update the number of the lli tables created */
2202 dma_ctx
->num_lli_tables_created
+= 2;
2204 lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2205 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2206 dma_lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2207 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2209 /* Calculate the maximum size of data for input table */
2210 in_table_data_size
=
2211 sep_calculate_lli_table_max_size(sep
,
2212 &lli_in_array
[current_in_entry
],
2213 (sep_in_lli_entries
- current_in_entry
),
2216 /* Calculate the maximum size of data for output table */
2217 out_table_data_size
=
2218 sep_calculate_lli_table_max_size(sep
,
2219 &lli_out_array
[current_out_entry
],
2220 (sep_out_lli_entries
- current_out_entry
),
2223 if (!last_table_flag
) {
2224 in_table_data_size
= (in_table_data_size
/
2225 block_size
) * block_size
;
2226 out_table_data_size
= (out_table_data_size
/
2227 block_size
) * block_size
;
2230 table_data_size
= in_table_data_size
;
2231 if (table_data_size
> out_table_data_size
)
2232 table_data_size
= out_table_data_size
;
2234 dev_dbg(&sep
->pdev
->dev
,
2235 "[PID%d] construct tables from lli in_table_data_size is (hex) %x\n",
2236 current
->pid
, in_table_data_size
);
2238 dev_dbg(&sep
->pdev
->dev
,
2239 "[PID%d] construct tables from lli out_table_data_size is (hex) %x\n",
2240 current
->pid
, out_table_data_size
);
2242 /* Construct input lli table */
2243 sep_build_lli_table(sep
, &lli_in_array
[current_in_entry
],
2246 &num_entries_in_table
,
2249 /* Construct output lli table */
2250 sep_build_lli_table(sep
, &lli_out_array
[current_out_entry
],
2253 &num_entries_out_table
,
2256 /* If info entry is null - this is the first table built */
2257 if (info_in_entry_ptr
== NULL
|| info_out_entry_ptr
== NULL
) {
2258 /* Set the output parameters to physical addresses */
2260 sep_shared_area_virt_to_bus(sep
, dma_in_lli_table_ptr
);
2262 *in_num_entries_ptr
= num_entries_in_table
;
2264 *lli_table_out_ptr
=
2265 sep_shared_area_virt_to_bus(sep
,
2266 dma_out_lli_table_ptr
);
2268 *out_num_entries_ptr
= num_entries_out_table
;
2269 *table_data_size_ptr
= table_data_size
;
2271 dev_dbg(&sep
->pdev
->dev
,
2272 "[PID%d] output lli_table_in_ptr is %08lx\n",
2274 (unsigned long)*lli_table_in_ptr
);
2275 dev_dbg(&sep
->pdev
->dev
,
2276 "[PID%d] output lli_table_out_ptr is %08lx\n",
2278 (unsigned long)*lli_table_out_ptr
);
2280 /* Update the info entry of the previous in table */
2281 info_in_entry_ptr
->bus_address
=
2282 sep_shared_area_virt_to_bus(sep
,
2283 dma_in_lli_table_ptr
);
2285 info_in_entry_ptr
->block_size
=
2286 ((num_entries_in_table
) << 24) |
2289 /* Update the info entry of the previous in table */
2290 info_out_entry_ptr
->bus_address
=
2291 sep_shared_area_virt_to_bus(sep
,
2292 dma_out_lli_table_ptr
);
2294 info_out_entry_ptr
->block_size
=
2295 ((num_entries_out_table
) << 24) |
2298 dev_dbg(&sep
->pdev
->dev
,
2299 "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
2301 (unsigned long)info_in_entry_ptr
->bus_address
,
2302 info_in_entry_ptr
->block_size
);
2304 dev_dbg(&sep
->pdev
->dev
,
2305 "[PID%d] output lli_table_out_ptr: %08lx %08x\n",
2307 (unsigned long)info_out_entry_ptr
->bus_address
,
2308 info_out_entry_ptr
->block_size
);
2311 /* Save the pointer to the info entry of the current tables */
2312 info_in_entry_ptr
= in_lli_table_ptr
+
2313 num_entries_in_table
- 1;
2314 info_out_entry_ptr
= out_lli_table_ptr
+
2315 num_entries_out_table
- 1;
2317 dev_dbg(&sep
->pdev
->dev
,
2318 "[PID%d] output num_entries_out_table is %x\n",
2320 (u32
)num_entries_out_table
);
2321 dev_dbg(&sep
->pdev
->dev
,
2322 "[PID%d] output info_in_entry_ptr is %lx\n",
2324 (unsigned long)info_in_entry_ptr
);
2325 dev_dbg(&sep
->pdev
->dev
,
2326 "[PID%d] output info_out_entry_ptr is %lx\n",
2328 (unsigned long)info_out_entry_ptr
);
2331 /* Print input tables */
2332 if (!dmatables_region
) {
2333 sep_debug_print_lli_tables(
2335 (struct sep_lli_entry
*)
2336 sep_shared_area_bus_to_virt(sep
, *lli_table_in_ptr
),
2337 *in_num_entries_ptr
,
2338 *table_data_size_ptr
);
2341 /* Print output tables */
2342 if (!dmatables_region
) {
2343 sep_debug_print_lli_tables(
2345 (struct sep_lli_entry
*)
2346 sep_shared_area_bus_to_virt(sep
, *lli_table_out_ptr
),
2347 *out_num_entries_ptr
,
2348 *table_data_size_ptr
);
2355 * sep_prepare_input_output_dma_table - prepare DMA I/O table
2356 * @app_virt_in_addr:
2357 * @app_virt_out_addr:
2360 * @lli_table_in_ptr:
2361 * @lli_table_out_ptr:
2362 * @in_num_entries_ptr:
2363 * @out_num_entries_ptr:
2364 * @table_data_size_ptr:
2365 * @is_kva: set for kernel data; used only for kernel crypto module
2367 * This function builds input and output DMA tables for synchronic
2368 * symmetric operations (AES, DES, HASH). It also checks that each table
2369 * is of the modular block size
2370 * Note that all bus addresses that are passed to the SEP
2371 * are in 32 bit format; the SEP is a 32 bit device
2373 static int sep_prepare_input_output_dma_table(struct sep_device
*sep
,
2374 unsigned long app_virt_in_addr
,
2375 unsigned long app_virt_out_addr
,
2378 dma_addr_t
*lli_table_in_ptr
,
2379 dma_addr_t
*lli_table_out_ptr
,
2380 u32
*in_num_entries_ptr
,
2381 u32
*out_num_entries_ptr
,
2382 u32
*table_data_size_ptr
,
2384 void **dmatables_region
,
2385 struct sep_dma_context
*dma_ctx
)
2389 /* Array of pointers of page */
2390 struct sep_lli_entry
*lli_in_array
;
2391 /* Array of pointers of page */
2392 struct sep_lli_entry
*lli_out_array
;
2399 if (data_size
== 0) {
2400 /* Prepare empty table for input and output */
2401 if (dmatables_region
) {
2402 error
= sep_allocate_dmatables_region(
2410 sep_prepare_empty_lli_table(sep
, lli_table_in_ptr
,
2411 in_num_entries_ptr
, table_data_size_ptr
,
2412 dmatables_region
, dma_ctx
);
2414 sep_prepare_empty_lli_table(sep
, lli_table_out_ptr
,
2415 out_num_entries_ptr
, table_data_size_ptr
,
2416 dmatables_region
, dma_ctx
);
2418 goto update_dcb_counter
;
2421 /* Initialize the pages pointers */
2422 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
= NULL
;
2423 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
= NULL
;
2425 /* Lock the pages of the buffer and translate them to pages */
2427 dev_dbg(&sep
->pdev
->dev
, "[PID%d] Locking kernel input pages\n",
2429 error
= sep_lock_kernel_pages(sep
, app_virt_in_addr
,
2430 data_size
, &lli_in_array
, SEP_DRIVER_IN_FLAG
,
2433 dev_warn(&sep
->pdev
->dev
,
2434 "[PID%d] sep_lock_kernel_pages for input virtual buffer failed\n",
2440 dev_dbg(&sep
->pdev
->dev
, "[PID%d] Locking kernel output pages\n",
2442 error
= sep_lock_kernel_pages(sep
, app_virt_out_addr
,
2443 data_size
, &lli_out_array
, SEP_DRIVER_OUT_FLAG
,
2447 dev_warn(&sep
->pdev
->dev
,
2448 "[PID%d] sep_lock_kernel_pages for output virtual buffer failed\n",
2451 goto end_function_free_lli_in
;
2457 dev_dbg(&sep
->pdev
->dev
, "[PID%d] Locking user input pages\n",
2459 error
= sep_lock_user_pages(sep
, app_virt_in_addr
,
2460 data_size
, &lli_in_array
, SEP_DRIVER_IN_FLAG
,
2463 dev_warn(&sep
->pdev
->dev
,
2464 "[PID%d] sep_lock_user_pages for input virtual buffer failed\n",
2470 if (dma_ctx
->secure_dma
) {
2471 /* secure_dma requires use of non accessible memory */
2472 dev_dbg(&sep
->pdev
->dev
, "[PID%d] in secure_dma\n",
2474 error
= sep_lli_table_secure_dma(sep
,
2475 app_virt_out_addr
, data_size
, &lli_out_array
,
2476 SEP_DRIVER_OUT_FLAG
, dma_ctx
);
2478 dev_warn(&sep
->pdev
->dev
,
2479 "[PID%d] secure dma table setup for output virtual buffer failed\n",
2482 goto end_function_free_lli_in
;
2485 /* For normal, non-secure dma */
2486 dev_dbg(&sep
->pdev
->dev
, "[PID%d] not in secure_dma\n",
2489 dev_dbg(&sep
->pdev
->dev
,
2490 "[PID%d] Locking user output pages\n",
2493 error
= sep_lock_user_pages(sep
, app_virt_out_addr
,
2494 data_size
, &lli_out_array
, SEP_DRIVER_OUT_FLAG
,
2498 dev_warn(&sep
->pdev
->dev
,
2499 "[PID%d] sep_lock_user_pages for output virtual buffer failed\n",
2502 goto end_function_free_lli_in
;
2507 dev_dbg(&sep
->pdev
->dev
,
2508 "[PID%d] After lock; prep input output dma table sep_in_num_pages is (hex) %x\n",
2510 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
);
2512 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_out_num_pages is (hex) %x\n",
2514 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_num_pages
);
2516 dev_dbg(&sep
->pdev
->dev
,
2517 "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is (hex) %x\n",
2518 current
->pid
, SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
);
2520 /* Call the function that creates table from the lli arrays */
2521 dev_dbg(&sep
->pdev
->dev
, "[PID%d] calling create table from lli\n",
2523 error
= sep_construct_dma_tables_from_lli(
2525 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].
2528 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].
2530 block_size
, lli_table_in_ptr
, lli_table_out_ptr
,
2531 in_num_entries_ptr
, out_num_entries_ptr
,
2532 table_data_size_ptr
, dmatables_region
, dma_ctx
);
2535 dev_warn(&sep
->pdev
->dev
,
2536 "[PID%d] sep_construct_dma_tables_from_lli failed\n",
2538 goto end_function_with_error
;
2541 kfree(lli_out_array
);
2542 kfree(lli_in_array
);
2545 /* Update DCB counter */
2546 dma_ctx
->nr_dcb_creat
++;
2550 end_function_with_error
:
2551 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
);
2552 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
= NULL
;
2553 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
);
2554 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
= NULL
;
2555 kfree(lli_out_array
);
2558 end_function_free_lli_in
:
2559 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
);
2560 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
= NULL
;
2561 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
);
2562 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
= NULL
;
2563 kfree(lli_in_array
);
2572 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2573 * @app_in_address: unsigned long; for data buffer in (user space)
2574 * @app_out_address: unsigned long; for data buffer out (user space)
2575 * @data_in_size: u32; for size of data
2576 * @block_size: u32; for block size
2577 * @tail_block_size: u32; for size of tail block
2578 * @isapplet: bool; to indicate external app
2579 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2580 * @secure_dma; indicates whether this is secure_dma using IMR
2582 * This function prepares the linked DMA tables and puts the
2583 * address for the linked list of tables inta a DCB (data control
2584 * block) the address of which is known by the SEP hardware
2585 * Note that all bus addresses that are passed to the SEP
2586 * are in 32 bit format; the SEP is a 32 bit device
2588 int sep_prepare_input_output_dma_table_in_dcb(struct sep_device
*sep
,
2589 unsigned long app_in_address
,
2590 unsigned long app_out_address
,
2593 u32 tail_block_size
,
2597 struct sep_dcblock
*dcb_region
,
2598 void **dmatables_region
,
2599 struct sep_dma_context
**dma_ctx
,
2600 struct scatterlist
*src_sg
,
2601 struct scatterlist
*dst_sg
)
2606 /* Address of the created DCB table */
2607 struct sep_dcblock
*dcb_table_ptr
= NULL
;
2608 /* The physical address of the first input DMA table */
2609 dma_addr_t in_first_mlli_address
= 0;
2610 /* Number of entries in the first input DMA table */
2611 u32 in_first_num_entries
= 0;
2612 /* The physical address of the first output DMA table */
2613 dma_addr_t out_first_mlli_address
= 0;
2614 /* Number of entries in the first output DMA table */
2615 u32 out_first_num_entries
= 0;
2616 /* Data in the first input/output table */
2617 u32 first_data_size
= 0;
2619 dev_dbg(&sep
->pdev
->dev
, "[PID%d] app_in_address %lx\n",
2620 current
->pid
, app_in_address
);
2622 dev_dbg(&sep
->pdev
->dev
, "[PID%d] app_out_address %lx\n",
2623 current
->pid
, app_out_address
);
2625 dev_dbg(&sep
->pdev
->dev
, "[PID%d] data_in_size %x\n",
2626 current
->pid
, data_in_size
);
2628 dev_dbg(&sep
->pdev
->dev
, "[PID%d] block_size %x\n",
2629 current
->pid
, block_size
);
2631 dev_dbg(&sep
->pdev
->dev
, "[PID%d] tail_block_size %x\n",
2632 current
->pid
, tail_block_size
);
2634 dev_dbg(&sep
->pdev
->dev
, "[PID%d] isapplet %x\n",
2635 current
->pid
, isapplet
);
2637 dev_dbg(&sep
->pdev
->dev
, "[PID%d] is_kva %x\n",
2638 current
->pid
, is_kva
);
2640 dev_dbg(&sep
->pdev
->dev
, "[PID%d] src_sg %p\n",
2641 current
->pid
, src_sg
);
2643 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dst_sg %p\n",
2644 current
->pid
, dst_sg
);
2647 dev_warn(&sep
->pdev
->dev
, "[PID%d] no DMA context pointer\n",
2654 /* In case there are multiple DCBs for this transaction */
2655 dev_dbg(&sep
->pdev
->dev
, "[PID%d] DMA context already set\n",
2658 *dma_ctx
= kzalloc(sizeof(**dma_ctx
), GFP_KERNEL
);
2660 dev_dbg(&sep
->pdev
->dev
,
2661 "[PID%d] Not enough memory for DMA context\n",
2666 dev_dbg(&sep
->pdev
->dev
,
2667 "[PID%d] Created DMA context addr at 0x%p\n",
2668 current
->pid
, *dma_ctx
);
2671 (*dma_ctx
)->secure_dma
= secure_dma
;
2673 /* these are for kernel crypto only */
2674 (*dma_ctx
)->src_sg
= src_sg
;
2675 (*dma_ctx
)->dst_sg
= dst_sg
;
2677 if ((*dma_ctx
)->nr_dcb_creat
== SEP_MAX_NUM_SYNC_DMA_OPS
) {
2678 /* No more DCBs to allocate */
2679 dev_dbg(&sep
->pdev
->dev
, "[PID%d] no more DCBs available\n",
2682 goto end_function_error
;
2685 /* Allocate new DCB */
2687 dcb_table_ptr
= dcb_region
;
2689 dcb_table_ptr
= (struct sep_dcblock
*)(sep
->shared_addr
+
2690 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES
+
2691 ((*dma_ctx
)->nr_dcb_creat
*
2692 sizeof(struct sep_dcblock
)));
2695 /* Set the default values in the DCB */
2696 dcb_table_ptr
->input_mlli_address
= 0;
2697 dcb_table_ptr
->input_mlli_num_entries
= 0;
2698 dcb_table_ptr
->input_mlli_data_size
= 0;
2699 dcb_table_ptr
->output_mlli_address
= 0;
2700 dcb_table_ptr
->output_mlli_num_entries
= 0;
2701 dcb_table_ptr
->output_mlli_data_size
= 0;
2702 dcb_table_ptr
->tail_data_size
= 0;
2703 dcb_table_ptr
->out_vr_tail_pt
= 0;
2707 /* Check if there is enough data for DMA operation */
2708 if (data_in_size
< SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
) {
2711 goto end_function_error
;
2713 if (copy_from_user(dcb_table_ptr
->tail_data
,
2714 (void __user
*)app_in_address
,
2717 goto end_function_error
;
2721 dcb_table_ptr
->tail_data_size
= data_in_size
;
2723 /* Set the output user-space address for mem2mem op */
2724 if (app_out_address
)
2725 dcb_table_ptr
->out_vr_tail_pt
=
2726 (aligned_u64
)app_out_address
;
2729 * Update both data length parameters in order to avoid
2730 * second data copy and allow building of empty mlli
2737 if (!app_out_address
) {
2738 tail_size
= data_in_size
% block_size
;
2740 if (tail_block_size
== block_size
)
2741 tail_size
= block_size
;
2748 if (tail_size
> sizeof(dcb_table_ptr
->tail_data
))
2752 goto end_function_error
;
2754 /* We have tail data - copy it to DCB */
2755 if (copy_from_user(dcb_table_ptr
->tail_data
,
2756 (void __user
*)(app_in_address
+
2757 data_in_size
- tail_size
), tail_size
)) {
2759 goto end_function_error
;
2762 if (app_out_address
)
2764 * Calculate the output address
2765 * according to tail data size
2767 dcb_table_ptr
->out_vr_tail_pt
=
2768 (aligned_u64
)app_out_address
+
2769 data_in_size
- tail_size
;
2771 /* Save the real tail data size */
2772 dcb_table_ptr
->tail_data_size
= tail_size
;
2774 * Update the data size without the tail
2775 * data size AKA data for the dma
2777 data_in_size
= (data_in_size
- tail_size
);
2780 /* Check if we need to build only input table or input/output */
2781 if (app_out_address
) {
2782 /* Prepare input/output tables */
2783 error
= sep_prepare_input_output_dma_table(sep
,
2788 &in_first_mlli_address
,
2789 &out_first_mlli_address
,
2790 &in_first_num_entries
,
2791 &out_first_num_entries
,
2797 /* Prepare input tables */
2798 error
= sep_prepare_input_dma_table(sep
,
2802 &in_first_mlli_address
,
2803 &in_first_num_entries
,
2811 dev_warn(&sep
->pdev
->dev
,
2812 "prepare DMA table call failed from prepare DCB call\n");
2813 goto end_function_error
;
2816 /* Set the DCB values */
2817 dcb_table_ptr
->input_mlli_address
= in_first_mlli_address
;
2818 dcb_table_ptr
->input_mlli_num_entries
= in_first_num_entries
;
2819 dcb_table_ptr
->input_mlli_data_size
= first_data_size
;
2820 dcb_table_ptr
->output_mlli_address
= out_first_mlli_address
;
2821 dcb_table_ptr
->output_mlli_num_entries
= out_first_num_entries
;
2822 dcb_table_ptr
->output_mlli_data_size
= first_data_size
;
2837 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2838 * @sep: pointer to struct sep_device
2839 * @isapplet: indicates external application (used for kernel access)
2840 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2842 * This function frees the DMA tables and DCB
2844 static int sep_free_dma_tables_and_dcb(struct sep_device
*sep
, bool isapplet
,
2845 bool is_kva
, struct sep_dma_context
**dma_ctx
)
2847 struct sep_dcblock
*dcb_table_ptr
;
2848 unsigned long pt_hold
;
2855 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_free_dma_tables_and_dcb\n",
2857 if (!dma_ctx
|| !*dma_ctx
) /* nothing to be done here*/
2860 if (!(*dma_ctx
)->secure_dma
&& isapplet
) {
2861 dev_dbg(&sep
->pdev
->dev
, "[PID%d] handling applet\n",
2864 /* Tail stuff is only for non secure_dma */
2865 /* Set pointer to first DCB table */
2866 dcb_table_ptr
= (struct sep_dcblock
*)
2868 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES
);
2871 * Go over each DCB and see if
2872 * tail pointer must be updated
2874 for (i
= 0; i
< (*dma_ctx
)->nr_dcb_creat
;
2875 i
++, dcb_table_ptr
++) {
2876 if (dcb_table_ptr
->out_vr_tail_pt
) {
2877 pt_hold
= (unsigned long)dcb_table_ptr
->
2879 tail_pt
= (void *)pt_hold
;
2884 error_temp
= copy_to_user(
2885 (void __user
*)tail_pt
,
2886 dcb_table_ptr
->tail_data
,
2887 dcb_table_ptr
->tail_data_size
);
2890 /* Release the DMA resource */
2898 /* Free the output pages, if any */
2899 sep_free_dma_table_data_handler(sep
, dma_ctx
);
2901 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_free_dma_tables_and_dcb end\n",
2908 * sep_prepare_dcb_handler - prepare a control block
2909 * @sep: pointer to struct sep_device
2910 * @arg: pointer to user parameters
2911 * @secure_dma: indicate whether we are using secure_dma on IMR
2913 * This function will retrieve the RAR buffer physical addresses, type
2914 * & size corresponding to the RAR handles provided in the buffers vector.
2916 static int sep_prepare_dcb_handler(struct sep_device
*sep
, unsigned long arg
,
2918 struct sep_dma_context
**dma_ctx
)
2921 /* Command arguments */
2922 static struct build_dcb_struct command_args
;
2924 /* Get the command arguments */
2925 if (copy_from_user(&command_args
, (void __user
*)arg
,
2926 sizeof(struct build_dcb_struct
))) {
2931 dev_dbg(&sep
->pdev
->dev
,
2932 "[PID%d] prep dcb handler app_in_address is %08llx\n",
2933 current
->pid
, command_args
.app_in_address
);
2934 dev_dbg(&sep
->pdev
->dev
,
2935 "[PID%d] app_out_address is %08llx\n",
2936 current
->pid
, command_args
.app_out_address
);
2937 dev_dbg(&sep
->pdev
->dev
,
2938 "[PID%d] data_size is %x\n",
2939 current
->pid
, command_args
.data_in_size
);
2940 dev_dbg(&sep
->pdev
->dev
,
2941 "[PID%d] block_size is %x\n",
2942 current
->pid
, command_args
.block_size
);
2943 dev_dbg(&sep
->pdev
->dev
,
2944 "[PID%d] tail block_size is %x\n",
2945 current
->pid
, command_args
.tail_block_size
);
2946 dev_dbg(&sep
->pdev
->dev
,
2947 "[PID%d] is_applet is %x\n",
2948 current
->pid
, command_args
.is_applet
);
2950 if (!command_args
.app_in_address
) {
2951 dev_warn(&sep
->pdev
->dev
,
2952 "[PID%d] null app_in_address\n", current
->pid
);
2957 error
= sep_prepare_input_output_dma_table_in_dcb(sep
,
2958 (unsigned long)command_args
.app_in_address
,
2959 (unsigned long)command_args
.app_out_address
,
2960 command_args
.data_in_size
, command_args
.block_size
,
2961 command_args
.tail_block_size
,
2962 command_args
.is_applet
, false,
2963 secure_dma
, NULL
, NULL
, dma_ctx
, NULL
, NULL
);
2971 * sep_free_dcb_handler - free control block resources
2972 * @sep: pointer to struct sep_device
2974 * This function frees the DCB resources and updates the needed
2975 * user-space buffers.
2977 static int sep_free_dcb_handler(struct sep_device
*sep
,
2978 struct sep_dma_context
**dma_ctx
)
2980 if (!dma_ctx
|| !(*dma_ctx
)) {
2981 dev_dbg(&sep
->pdev
->dev
,
2982 "[PID%d] no dma context defined, nothing to free\n",
2987 dev_dbg(&sep
->pdev
->dev
, "[PID%d] free dcbs num of DCBs %x\n",
2989 (*dma_ctx
)->nr_dcb_creat
);
2991 return sep_free_dma_tables_and_dcb(sep
, false, false, dma_ctx
);
2995 * sep_ioctl - ioctl handler for sep device
2996 * @filp: pointer to struct file
2998 * @arg: pointer to argument structure
3000 * Implement the ioctl methods available on the SEP device.
3002 static long sep_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
3004 struct sep_private_data
* const private_data
= filp
->private_data
;
3005 struct sep_call_status
*call_status
= &private_data
->call_status
;
3006 struct sep_device
*sep
= private_data
->device
;
3007 struct sep_dma_context
**dma_ctx
= &private_data
->dma_ctx
;
3008 struct sep_queue_info
**my_queue_elem
= &private_data
->my_queue_elem
;
3011 dev_dbg(&sep
->pdev
->dev
, "[PID%d] ioctl cmd 0x%x\n",
3013 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dma context addr 0x%p\n",
3014 current
->pid
, *dma_ctx
);
3016 /* Make sure we own this device */
3017 error
= sep_check_transaction_owner(sep
);
3019 dev_dbg(&sep
->pdev
->dev
, "[PID%d] ioctl pid is not owner\n",
3024 /* Check that sep_mmap has been called before */
3025 if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET
,
3026 &call_status
->status
)) {
3027 dev_dbg(&sep
->pdev
->dev
,
3028 "[PID%d] mmap not called\n", current
->pid
);
3033 /* Check that the command is for SEP device */
3034 if (_IOC_TYPE(cmd
) != SEP_IOC_MAGIC_NUMBER
) {
3040 case SEP_IOCSENDSEPCOMMAND
:
3041 dev_dbg(&sep
->pdev
->dev
,
3042 "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
3044 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
,
3045 &call_status
->status
)) {
3046 dev_warn(&sep
->pdev
->dev
,
3047 "[PID%d] send msg already done\n",
3052 /* Send command to SEP */
3053 error
= sep_send_command_handler(sep
);
3055 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
,
3056 &call_status
->status
);
3057 dev_dbg(&sep
->pdev
->dev
,
3058 "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
3061 case SEP_IOCENDTRANSACTION
:
3062 dev_dbg(&sep
->pdev
->dev
,
3063 "[PID%d] SEP_IOCENDTRANSACTION start\n",
3065 error
= sep_end_transaction_handler(sep
, dma_ctx
, call_status
,
3067 dev_dbg(&sep
->pdev
->dev
,
3068 "[PID%d] SEP_IOCENDTRANSACTION end\n",
3071 case SEP_IOCPREPAREDCB
:
3072 dev_dbg(&sep
->pdev
->dev
,
3073 "[PID%d] SEP_IOCPREPAREDCB start\n",
3076 case SEP_IOCPREPAREDCB_SECURE_DMA
:
3077 dev_dbg(&sep
->pdev
->dev
,
3078 "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
3080 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
,
3081 &call_status
->status
)) {
3082 dev_dbg(&sep
->pdev
->dev
,
3083 "[PID%d] dcb prep needed before send msg\n",
3090 dev_dbg(&sep
->pdev
->dev
,
3091 "[PID%d] dcb null arg\n", current
->pid
);
3096 if (cmd
== SEP_IOCPREPAREDCB
) {
3098 dev_dbg(&sep
->pdev
->dev
,
3099 "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
3102 error
= sep_prepare_dcb_handler(sep
, arg
, false,
3106 dev_dbg(&sep
->pdev
->dev
,
3107 "[PID%d] SEP_IOC_POC (with secure_dma)\n",
3110 error
= sep_prepare_dcb_handler(sep
, arg
, true,
3113 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dcb's end\n",
3116 case SEP_IOCFREEDCB
:
3117 dev_dbg(&sep
->pdev
->dev
, "[PID%d] SEP_IOCFREEDCB start\n",
3119 case SEP_IOCFREEDCB_SECURE_DMA
:
3120 dev_dbg(&sep
->pdev
->dev
,
3121 "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
3123 error
= sep_free_dcb_handler(sep
, dma_ctx
);
3124 dev_dbg(&sep
->pdev
->dev
, "[PID%d] SEP_IOCFREEDCB end\n",
3129 dev_dbg(&sep
->pdev
->dev
, "[PID%d] default end\n",
3135 dev_dbg(&sep
->pdev
->dev
, "[PID%d] ioctl end\n", current
->pid
);
3141 * sep_inthandler - interrupt handler for sep device
3143 * @dev_id: device id
3145 static irqreturn_t
sep_inthandler(int irq
, void *dev_id
)
3147 unsigned long lock_irq_flag
;
3148 u32 reg_val
, reg_val2
= 0;
3149 struct sep_device
*sep
= dev_id
;
3150 irqreturn_t int_error
= IRQ_HANDLED
;
3152 /* Are we in power save? */
3153 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
3154 if (sep
->pdev
->dev
.power
.runtime_status
!= RPM_ACTIVE
) {
3155 dev_dbg(&sep
->pdev
->dev
, "interrupt during pwr save\n");
3160 if (test_bit(SEP_WORKING_LOCK_BIT
, &sep
->in_use_flags
) == 0) {
3161 dev_dbg(&sep
->pdev
->dev
, "interrupt while nobody using sep\n");
3165 /* Read the IRR register to check if this is SEP interrupt */
3166 reg_val
= sep_read_reg(sep
, HW_HOST_IRR_REG_ADDR
);
3168 dev_dbg(&sep
->pdev
->dev
, "sep int: IRR REG val: %x\n", reg_val
);
3170 if (reg_val
& (0x1 << 13)) {
3172 /* Lock and update the counter of reply messages */
3173 spin_lock_irqsave(&sep
->snd_rply_lck
, lock_irq_flag
);
3175 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lock_irq_flag
);
3177 dev_dbg(&sep
->pdev
->dev
, "sep int: send_ct %lx reply_ct %lx\n",
3178 sep
->send_ct
, sep
->reply_ct
);
3180 /* Is this a kernel client request */
3181 if (sep
->in_kernel
) {
3182 tasklet_schedule(&sep
->finish_tasklet
);
3183 goto finished_interrupt
;
3186 /* Is this printf or daemon request? */
3187 reg_val2
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
3188 dev_dbg(&sep
->pdev
->dev
,
3189 "SEP Interrupt - GPR2 is %08x\n", reg_val2
);
3191 clear_bit(SEP_WORKING_LOCK_BIT
, &sep
->in_use_flags
);
3193 if ((reg_val2
>> 30) & 0x1) {
3194 dev_dbg(&sep
->pdev
->dev
, "int: printf request\n");
3195 } else if (reg_val2
>> 31) {
3196 dev_dbg(&sep
->pdev
->dev
, "int: daemon request\n");
3198 dev_dbg(&sep
->pdev
->dev
, "int: SEP reply\n");
3199 wake_up(&sep
->event_interrupt
);
3202 dev_dbg(&sep
->pdev
->dev
, "int: not SEP interrupt\n");
3203 int_error
= IRQ_NONE
;
3208 if (int_error
== IRQ_HANDLED
)
3209 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, reg_val
);
3215 * sep_reconfig_shared_area - reconfigure shared area
3216 * @sep: pointer to struct sep_device
3218 * Reconfig the shared area between HOST and SEP - needed in case
3219 * the DX_CC_Init function was called before OS loading.
3221 static int sep_reconfig_shared_area(struct sep_device
*sep
)
3225 /* use to limit waiting for SEP */
3226 unsigned long end_time
;
3228 /* Send the new SHARED MESSAGE AREA to the SEP */
3229 dev_dbg(&sep
->pdev
->dev
, "reconfig shared; sending %08llx to sep\n",
3230 (unsigned long long)sep
->shared_bus
);
3232 sep_write_reg(sep
, HW_HOST_HOST_SEP_GPR1_REG_ADDR
, sep
->shared_bus
);
3234 /* Poll for SEP response */
3235 ret_val
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR1_REG_ADDR
);
3237 end_time
= jiffies
+ (WAIT_TIME
* HZ
);
3239 while ((time_before(jiffies
, end_time
)) && (ret_val
!= 0xffffffff) &&
3240 (ret_val
!= sep
->shared_bus
))
3241 ret_val
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR1_REG_ADDR
);
3243 /* Check the return value (register) */
3244 if (ret_val
!= sep
->shared_bus
) {
3245 dev_warn(&sep
->pdev
->dev
, "could not reconfig shared area\n");
3246 dev_warn(&sep
->pdev
->dev
, "result was %x\n", ret_val
);
3251 dev_dbg(&sep
->pdev
->dev
, "reconfig shared area end\n");
3257 * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
3260 * @dcb_region: DCB region copy
3261 * @dmatables_region: MLLI/DMA tables copy
3262 * @dma_ctx: DMA context for current transaction
3264 ssize_t
sep_activate_dcb_dmatables_context(struct sep_device
*sep
,
3265 struct sep_dcblock
**dcb_region
,
3266 void **dmatables_region
,
3267 struct sep_dma_context
*dma_ctx
)
3269 void *dmaregion_free_start
= NULL
;
3270 void *dmaregion_free_end
= NULL
;
3271 void *dcbregion_free_start
= NULL
;
3272 void *dcbregion_free_end
= NULL
;
3275 dev_dbg(&sep
->pdev
->dev
, "[PID%d] activating dcb/dma region\n",
3278 if (1 > dma_ctx
->nr_dcb_creat
) {
3279 dev_warn(&sep
->pdev
->dev
,
3280 "[PID%d] invalid number of dcbs to activate 0x%08X\n",
3281 current
->pid
, dma_ctx
->nr_dcb_creat
);
3286 dmaregion_free_start
= sep
->shared_addr
3287 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
;
3288 dmaregion_free_end
= dmaregion_free_start
3289 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
- 1;
3291 if (dmaregion_free_start
3292 + dma_ctx
->dmatables_len
> dmaregion_free_end
) {
3296 memcpy(dmaregion_free_start
,
3298 dma_ctx
->dmatables_len
);
3299 /* Free MLLI table copy */
3300 kfree(*dmatables_region
);
3301 *dmatables_region
= NULL
;
3303 /* Copy thread's DCB table copy to DCB table region */
3304 dcbregion_free_start
= sep
->shared_addr
+
3305 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES
;
3306 dcbregion_free_end
= dcbregion_free_start
+
3307 (SEP_MAX_NUM_SYNC_DMA_OPS
*
3308 sizeof(struct sep_dcblock
)) - 1;
3310 if (dcbregion_free_start
3311 + (dma_ctx
->nr_dcb_creat
* sizeof(struct sep_dcblock
))
3312 > dcbregion_free_end
) {
3317 memcpy(dcbregion_free_start
,
3319 dma_ctx
->nr_dcb_creat
* sizeof(struct sep_dcblock
));
3321 /* Print the tables */
3322 dev_dbg(&sep
->pdev
->dev
, "activate: input table\n");
3323 sep_debug_print_lli_tables(sep
,
3324 (struct sep_lli_entry
*)sep_shared_area_bus_to_virt(sep
,
3325 (*dcb_region
)->input_mlli_address
),
3326 (*dcb_region
)->input_mlli_num_entries
,
3327 (*dcb_region
)->input_mlli_data_size
);
3329 dev_dbg(&sep
->pdev
->dev
, "activate: output table\n");
3330 sep_debug_print_lli_tables(sep
,
3331 (struct sep_lli_entry
*)sep_shared_area_bus_to_virt(sep
,
3332 (*dcb_region
)->output_mlli_address
),
3333 (*dcb_region
)->output_mlli_num_entries
,
3334 (*dcb_region
)->output_mlli_data_size
);
3336 dev_dbg(&sep
->pdev
->dev
,
3337 "[PID%d] printing activated tables\n", current
->pid
);
3340 kfree(*dmatables_region
);
3341 *dmatables_region
= NULL
;
3350 * sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
3352 * @dcb_region: DCB region buf to create for current transaction
3353 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3354 * @dma_ctx: DMA context buf to create for current transaction
3355 * @user_dcb_args: User arguments for DCB/MLLI creation
3356 * @num_dcbs: Number of DCBs to create
3357 * @secure_dma: Indicate use of IMR restricted memory secure dma
3359 static ssize_t
sep_create_dcb_dmatables_context(struct sep_device
*sep
,
3360 struct sep_dcblock
**dcb_region
,
3361 void **dmatables_region
,
3362 struct sep_dma_context
**dma_ctx
,
3363 const struct build_dcb_struct __user
*user_dcb_args
,
3364 const u32 num_dcbs
, bool secure_dma
)
3368 struct build_dcb_struct
*dcb_args
= NULL
;
3370 dev_dbg(&sep
->pdev
->dev
, "[PID%d] creating dcb/dma region\n",
3373 if (!dcb_region
|| !dma_ctx
|| !dmatables_region
|| !user_dcb_args
) {
3378 if (SEP_MAX_NUM_SYNC_DMA_OPS
< num_dcbs
) {
3379 dev_warn(&sep
->pdev
->dev
,
3380 "[PID%d] invalid number of dcbs 0x%08X\n",
3381 current
->pid
, num_dcbs
);
3386 dcb_args
= kcalloc(num_dcbs
, sizeof(struct build_dcb_struct
),
3393 if (copy_from_user(dcb_args
,
3395 num_dcbs
* sizeof(struct build_dcb_struct
))) {
3400 /* Allocate thread-specific memory for DCB */
3401 *dcb_region
= kzalloc(num_dcbs
* sizeof(struct sep_dcblock
),
3403 if (!(*dcb_region
)) {
3408 /* Prepare DCB and MLLI table into the allocated regions */
3409 for (i
= 0; i
< num_dcbs
; i
++) {
3410 error
= sep_prepare_input_output_dma_table_in_dcb(sep
,
3411 (unsigned long)dcb_args
[i
].app_in_address
,
3412 (unsigned long)dcb_args
[i
].app_out_address
,
3413 dcb_args
[i
].data_in_size
,
3414 dcb_args
[i
].block_size
,
3415 dcb_args
[i
].tail_block_size
,
3416 dcb_args
[i
].is_applet
,
3418 *dcb_region
, dmatables_region
,
3423 dev_warn(&sep
->pdev
->dev
,
3424 "[PID%d] dma table creation failed\n",
3429 if (dcb_args
[i
].app_in_address
!= 0)
3430 (*dma_ctx
)->input_data_len
+= dcb_args
[i
].data_in_size
;
3440 * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
3443 * @dcb_region: DCB region buf to create for current transaction
3444 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3445 * @dma_ctx: DMA context buf to create for current transaction
3446 * @user_dcb_args: User arguments for DCB/MLLI creation
3447 * @num_dcbs: Number of DCBs to create
3448 * This does that same thing as sep_create_dcb_dmatables_context
3449 * except that it is used only for the kernel crypto operation. It is
3450 * separate because there is no user data involved; the dcb data structure
3451 * is specific for kernel crypto (build_dcb_struct_kernel)
3453 int sep_create_dcb_dmatables_context_kernel(struct sep_device
*sep
,
3454 struct sep_dcblock
**dcb_region
,
3455 void **dmatables_region
,
3456 struct sep_dma_context
**dma_ctx
,
3457 const struct build_dcb_struct_kernel
*dcb_data
,
3463 dev_dbg(&sep
->pdev
->dev
, "[PID%d] creating dcb/dma region\n",
3466 if (!dcb_region
|| !dma_ctx
|| !dmatables_region
|| !dcb_data
) {
3471 if (SEP_MAX_NUM_SYNC_DMA_OPS
< num_dcbs
) {
3472 dev_warn(&sep
->pdev
->dev
,
3473 "[PID%d] invalid number of dcbs 0x%08X\n",
3474 current
->pid
, num_dcbs
);
3479 dev_dbg(&sep
->pdev
->dev
, "[PID%d] num_dcbs is %d\n",
3480 current
->pid
, num_dcbs
);
3482 /* Allocate thread-specific memory for DCB */
3483 *dcb_region
= kzalloc(num_dcbs
* sizeof(struct sep_dcblock
),
3485 if (!(*dcb_region
)) {
3490 /* Prepare DCB and MLLI table into the allocated regions */
3491 for (i
= 0; i
< num_dcbs
; i
++) {
3492 error
= sep_prepare_input_output_dma_table_in_dcb(sep
,
3493 (unsigned long)dcb_data
->app_in_address
,
3494 (unsigned long)dcb_data
->app_out_address
,
3495 dcb_data
->data_in_size
,
3496 dcb_data
->block_size
,
3497 dcb_data
->tail_block_size
,
3498 dcb_data
->is_applet
,
3501 *dcb_region
, dmatables_region
,
3506 dev_warn(&sep
->pdev
->dev
,
3507 "[PID%d] dma table creation failed\n",
3519 * sep_activate_msgarea_context - Takes the message area context into use
3521 * @msg_region: Message area context buf
3522 * @msg_len: Message area context buffer size
3524 static ssize_t
sep_activate_msgarea_context(struct sep_device
*sep
,
3526 const size_t msg_len
)
3528 dev_dbg(&sep
->pdev
->dev
, "[PID%d] activating msg region\n",
3531 if (!msg_region
|| !(*msg_region
) ||
3532 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
< msg_len
) {
3533 dev_warn(&sep
->pdev
->dev
,
3534 "[PID%d] invalid act msgarea len 0x%08zX\n",
3535 current
->pid
, msg_len
);
3539 memcpy(sep
->shared_addr
, *msg_region
, msg_len
);
3545 * sep_create_msgarea_context - Creates message area context
3547 * @msg_region: Msg area region buf to create for current transaction
3548 * @msg_user: Content for msg area region from user
3549 * @msg_len: Message area size
3551 static ssize_t
sep_create_msgarea_context(struct sep_device
*sep
,
3553 const void __user
*msg_user
,
3554 const size_t msg_len
)
3558 dev_dbg(&sep
->pdev
->dev
, "[PID%d] creating msg region\n",
3563 SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES
< msg_len
||
3564 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES
> msg_len
) {
3565 dev_warn(&sep
->pdev
->dev
,
3566 "[PID%d] invalid creat msgarea len 0x%08zX\n",
3567 current
->pid
, msg_len
);
3572 /* Allocate thread-specific memory for message buffer */
3573 *msg_region
= kzalloc(msg_len
, GFP_KERNEL
);
3574 if (!(*msg_region
)) {
3579 /* Copy input data to write() to allocated message buffer */
3580 if (copy_from_user(*msg_region
, msg_user
, msg_len
)) {
3586 if (error
&& msg_region
) {
3596 * sep_read - Returns results of an operation for fastcall interface
3597 * @filp: File pointer
3598 * @buf_user: User buffer for storing results
3599 * @count_user: User buffer size
3600 * @offset: File offset, not supported
3602 * The implementation does not support reading in chunks, all data must be
3603 * consumed during a single read system call.
3605 static ssize_t
sep_read(struct file
*filp
,
3606 char __user
*buf_user
, size_t count_user
,
3609 struct sep_private_data
* const private_data
= filp
->private_data
;
3610 struct sep_call_status
*call_status
= &private_data
->call_status
;
3611 struct sep_device
*sep
= private_data
->device
;
3612 struct sep_dma_context
**dma_ctx
= &private_data
->dma_ctx
;
3613 struct sep_queue_info
**my_queue_elem
= &private_data
->my_queue_elem
;
3614 ssize_t error
= 0, error_tmp
= 0;
3616 /* Am I the process that owns the transaction? */
3617 error
= sep_check_transaction_owner(sep
);
3619 dev_dbg(&sep
->pdev
->dev
, "[PID%d] read pid is not owner\n",
3624 /* Checks that user has called necessary apis */
3625 if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET
,
3626 &call_status
->status
)) {
3627 dev_warn(&sep
->pdev
->dev
,
3628 "[PID%d] fastcall write not called\n",
3631 goto end_function_error
;
3635 dev_warn(&sep
->pdev
->dev
,
3636 "[PID%d] null user buffer\n",
3639 goto end_function_error
;
3643 /* Wait for SEP to finish */
3644 wait_event(sep
->event_interrupt
,
3645 test_bit(SEP_WORKING_LOCK_BIT
,
3646 &sep
->in_use_flags
) == 0);
3648 sep_dump_message(sep
);
3650 dev_dbg(&sep
->pdev
->dev
, "[PID%d] count_user = 0x%08zX\n",
3651 current
->pid
, count_user
);
3653 /* In case user has allocated bigger buffer */
3654 if (count_user
> SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
)
3655 count_user
= SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
;
3657 if (copy_to_user(buf_user
, sep
->shared_addr
, count_user
)) {
3659 goto end_function_error
;
3662 dev_dbg(&sep
->pdev
->dev
, "[PID%d] read succeeded\n", current
->pid
);
3666 /* Copy possible tail data to user and free DCB and MLLIs */
3667 error_tmp
= sep_free_dcb_handler(sep
, dma_ctx
);
3669 dev_warn(&sep
->pdev
->dev
, "[PID%d] dcb free failed\n",
3672 /* End the transaction, wakeup pending ones */
3673 error_tmp
= sep_end_transaction_handler(sep
, dma_ctx
, call_status
,
3676 dev_warn(&sep
->pdev
->dev
,
3677 "[PID%d] ending transaction failed\n",
3685 * sep_fastcall_args_get - Gets fastcall params from user
3687 * @args: Parameters buffer
3688 * @buf_user: User buffer for operation parameters
3689 * @count_user: User buffer size
3691 static inline ssize_t
sep_fastcall_args_get(struct sep_device
*sep
,
3692 struct sep_fastcall_hdr
*args
,
3693 const char __user
*buf_user
,
3694 const size_t count_user
)
3697 size_t actual_count
= 0;
3700 dev_warn(&sep
->pdev
->dev
,
3701 "[PID%d] null user buffer\n",
3707 if (count_user
< sizeof(struct sep_fastcall_hdr
)) {
3708 dev_warn(&sep
->pdev
->dev
,
3709 "[PID%d] too small message size 0x%08zX\n",
3710 current
->pid
, count_user
);
3716 if (copy_from_user(args
, buf_user
, sizeof(struct sep_fastcall_hdr
))) {
3721 if (SEP_FC_MAGIC
!= args
->magic
) {
3722 dev_warn(&sep
->pdev
->dev
,
3723 "[PID%d] invalid fastcall magic 0x%08X\n",
3724 current
->pid
, args
->magic
);
3729 dev_dbg(&sep
->pdev
->dev
, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
3730 current
->pid
, args
->num_dcbs
);
3731 dev_dbg(&sep
->pdev
->dev
, "[PID%d] fastcall hdr msg len 0x%08X\n",
3732 current
->pid
, args
->msg_len
);
3734 if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES
< args
->msg_len
||
3735 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES
> args
->msg_len
) {
3736 dev_warn(&sep
->pdev
->dev
,
3737 "[PID%d] invalid message length\n",
3743 actual_count
= sizeof(struct sep_fastcall_hdr
)
3745 + (args
->num_dcbs
* sizeof(struct build_dcb_struct
));
3747 if (actual_count
!= count_user
) {
3748 dev_warn(&sep
->pdev
->dev
,
3749 "[PID%d] inconsistent message sizes 0x%08zX vs 0x%08zX\n",
3750 current
->pid
, actual_count
, count_user
);
3760 * sep_write - Starts an operation for fastcall interface
3761 * @filp: File pointer
3762 * @buf_user: User buffer for operation parameters
3763 * @count_user: User buffer size
3764 * @offset: File offset, not supported
3766 * The implementation does not support writing in chunks,
3767 * all data must be given during a single write system call.
3769 static ssize_t
sep_write(struct file
*filp
,
3770 const char __user
*buf_user
, size_t count_user
,
3773 struct sep_private_data
* const private_data
= filp
->private_data
;
3774 struct sep_call_status
*call_status
= &private_data
->call_status
;
3775 struct sep_device
*sep
= private_data
->device
;
3776 struct sep_dma_context
*dma_ctx
= NULL
;
3777 struct sep_fastcall_hdr call_hdr
= {0};
3778 void *msg_region
= NULL
;
3779 void *dmatables_region
= NULL
;
3780 struct sep_dcblock
*dcb_region
= NULL
;
3782 struct sep_queue_info
*my_queue_elem
= NULL
;
3783 bool my_secure_dma
; /* are we using secure_dma (IMR)? */
3785 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep dev is 0x%p\n",
3787 dev_dbg(&sep
->pdev
->dev
, "[PID%d] private_data is 0x%p\n",
3788 current
->pid
, private_data
);
3790 error
= sep_fastcall_args_get(sep
, &call_hdr
, buf_user
, count_user
);
3794 buf_user
+= sizeof(struct sep_fastcall_hdr
);
3796 if (call_hdr
.secure_dma
== 0)
3797 my_secure_dma
= false;
3799 my_secure_dma
= true;
3802 * Controlling driver memory usage by limiting amount of
3803 * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
3804 * of threads can progress further at a time
3806 dev_dbg(&sep
->pdev
->dev
,
3807 "[PID%d] waiting for double buffering region access\n",
3809 error
= down_interruptible(&sep
->sep_doublebuf
);
3810 dev_dbg(&sep
->pdev
->dev
, "[PID%d] double buffering region start\n",
3813 /* Signal received */
3814 goto end_function_error
;
3819 * Prepare contents of the shared area regions for
3820 * the operation into temporary buffers
3822 if (0 < call_hdr
.num_dcbs
) {
3823 error
= sep_create_dcb_dmatables_context(sep
,
3827 (const struct build_dcb_struct __user
*)
3829 call_hdr
.num_dcbs
, my_secure_dma
);
3831 goto end_function_error_doublebuf
;
3833 buf_user
+= call_hdr
.num_dcbs
* sizeof(struct build_dcb_struct
);
3836 error
= sep_create_msgarea_context(sep
,
3841 goto end_function_error_doublebuf
;
3843 dev_dbg(&sep
->pdev
->dev
, "[PID%d] updating queue status\n",
3845 my_queue_elem
= sep_queue_status_add(sep
,
3846 ((struct sep_msgarea_hdr
*)msg_region
)->opcode
,
3847 (dma_ctx
) ? dma_ctx
->input_data_len
: 0,
3849 current
->comm
, sizeof(current
->comm
));
3851 if (!my_queue_elem
) {
3852 dev_dbg(&sep
->pdev
->dev
,
3853 "[PID%d] updating queue status error\n", current
->pid
);
3855 goto end_function_error_doublebuf
;
3858 /* Wait until current process gets the transaction */
3859 error
= sep_wait_transaction(sep
);
3862 /* Interrupted by signal, don't clear transaction */
3863 dev_dbg(&sep
->pdev
->dev
, "[PID%d] interrupted by signal\n",
3865 sep_queue_status_remove(sep
, &my_queue_elem
);
3866 goto end_function_error_doublebuf
;
3869 dev_dbg(&sep
->pdev
->dev
, "[PID%d] saving queue element\n",
3871 private_data
->my_queue_elem
= my_queue_elem
;
3873 /* Activate shared area regions for the transaction */
3874 error
= sep_activate_msgarea_context(sep
, &msg_region
,
3877 goto end_function_error_clear_transact
;
3879 sep_dump_message(sep
);
3881 if (0 < call_hdr
.num_dcbs
) {
3882 error
= sep_activate_dcb_dmatables_context(sep
,
3887 goto end_function_error_clear_transact
;
3890 /* Send command to SEP */
3891 error
= sep_send_command_handler(sep
);
3893 goto end_function_error_clear_transact
;
3895 /* Store DMA context for the transaction */
3896 private_data
->dma_ctx
= dma_ctx
;
3897 /* Update call status */
3898 set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET
, &call_status
->status
);
3901 up(&sep
->sep_doublebuf
);
3902 dev_dbg(&sep
->pdev
->dev
, "[PID%d] double buffering region end\n",
3907 end_function_error_clear_transact
:
3908 sep_end_transaction_handler(sep
, &dma_ctx
, call_status
,
3909 &private_data
->my_queue_elem
);
3911 end_function_error_doublebuf
:
3912 up(&sep
->sep_doublebuf
);
3913 dev_dbg(&sep
->pdev
->dev
, "[PID%d] double buffering region end\n",
3918 sep_free_dma_table_data_handler(sep
, &dma_ctx
);
3922 kfree(dmatables_region
);
3928 * sep_seek - Handler for seek system call
3929 * @filp: File pointer
3930 * @offset: File offset
3931 * @origin: Options for offset
3933 * Fastcall interface does not support seeking, all reads
3934 * and writes are from/to offset zero
3936 static loff_t
sep_seek(struct file
*filp
, loff_t offset
, int origin
)
3944 * sep_file_operations - file operation on sep device
3945 * @sep_ioctl: ioctl handler from user space call
3946 * @sep_poll: poll handler
3947 * @sep_open: handles sep device open request
3948 * @sep_release:handles sep device release request
3949 * @sep_mmap: handles memory mapping requests
3950 * @sep_read: handles read request on sep device
3951 * @sep_write: handles write request on sep device
3952 * @sep_seek: handles seek request on sep device
3954 static const struct file_operations sep_file_operations
= {
3955 .owner
= THIS_MODULE
,
3956 .unlocked_ioctl
= sep_ioctl
,
3959 .release
= sep_release
,
3967 * sep_sysfs_read - read sysfs entry per gives arguments
3968 * @filp: file pointer
3969 * @kobj: kobject pointer
3970 * @attr: binary file attributes
3971 * @buf: read to this buffer
3972 * @pos: offset to read
3973 * @count: amount of data to read
3975 * This function is to read sysfs entries for sep driver per given arguments.
3978 sep_sysfs_read(struct file
*filp
, struct kobject
*kobj
,
3979 struct bin_attribute
*attr
,
3980 char *buf
, loff_t pos
, size_t count
)
3982 unsigned long lck_flags
;
3983 size_t nleft
= count
;
3984 struct sep_device
*sep
= sep_dev
;
3985 struct sep_queue_info
*queue_elem
= NULL
;
3989 spin_lock_irqsave(&sep
->sep_queue_lock
, lck_flags
);
3991 queue_num
= sep
->sep_queue_num
;
3992 if (queue_num
> SEP_DOUBLEBUF_USERS_LIMIT
)
3993 queue_num
= SEP_DOUBLEBUF_USERS_LIMIT
;
3996 if (count
< sizeof(queue_num
)
3997 + (queue_num
* sizeof(struct sep_queue_data
))) {
3998 spin_unlock_irqrestore(&sep
->sep_queue_lock
, lck_flags
);
4002 memcpy(buf
, &queue_num
, sizeof(queue_num
));
4003 buf
+= sizeof(queue_num
);
4004 nleft
-= sizeof(queue_num
);
4006 list_for_each_entry(queue_elem
, &sep
->sep_queue_status
, list
) {
4007 if (i
++ > queue_num
)
4010 memcpy(buf
, &queue_elem
->data
, sizeof(queue_elem
->data
));
4011 nleft
-= sizeof(queue_elem
->data
);
4012 buf
+= sizeof(queue_elem
->data
);
4014 spin_unlock_irqrestore(&sep
->sep_queue_lock
, lck_flags
);
4016 return count
- nleft
;
4020 * bin_attributes - defines attributes for queue_status
4021 * @attr: attributes (name & permissions)
4022 * @read: function pointer to read this file
4023 * @size: maxinum size of binary attribute
4025 static const struct bin_attribute queue_status
= {
4026 .attr
= {.name
= "queue_status", .mode
= 0444},
4027 .read
= sep_sysfs_read
,
4029 + (SEP_DOUBLEBUF_USERS_LIMIT
* sizeof(struct sep_queue_data
)),
4033 * sep_register_driver_with_fs - register misc devices
4034 * @sep: pointer to struct sep_device
4036 * This function registers the driver with the file system
4038 static int sep_register_driver_with_fs(struct sep_device
*sep
)
4042 sep
->miscdev_sep
.minor
= MISC_DYNAMIC_MINOR
;
4043 sep
->miscdev_sep
.name
= SEP_DEV_NAME
;
4044 sep
->miscdev_sep
.fops
= &sep_file_operations
;
4046 ret_val
= misc_register(&sep
->miscdev_sep
);
4048 dev_warn(&sep
->pdev
->dev
, "misc reg fails for SEP %x\n",
4053 ret_val
= device_create_bin_file(sep
->miscdev_sep
.this_device
,
4056 dev_warn(&sep
->pdev
->dev
, "sysfs attribute1 fails for SEP %x\n",
4058 misc_deregister(&sep
->miscdev_sep
);
4067 *sep_probe - probe a matching PCI device
4069 *@ent: pci_device_id
4071 *Attempt to set up and configure a SEP device that has been
4072 *discovered by the PCI layer. Allocates all required resources.
4074 static int sep_probe(struct pci_dev
*pdev
,
4075 const struct pci_device_id
*ent
)
4078 struct sep_device
*sep
= NULL
;
4080 if (sep_dev
!= NULL
) {
4081 dev_dbg(&pdev
->dev
, "only one SEP supported.\n");
4085 /* Enable the device */
4086 error
= pci_enable_device(pdev
);
4088 dev_warn(&pdev
->dev
, "error enabling pci device\n");
4092 /* Allocate the sep_device structure for this device */
4093 sep_dev
= kzalloc(sizeof(struct sep_device
), GFP_ATOMIC
);
4094 if (sep_dev
== NULL
) {
4096 goto end_function_disable_device
;
4100 * We're going to use another variable for actually
4101 * working with the device; this way, if we have
4102 * multiple devices in the future, it would be easier
4103 * to make appropriate changes
4107 sep
->pdev
= pci_dev_get(pdev
);
4109 init_waitqueue_head(&sep
->event_transactions
);
4110 init_waitqueue_head(&sep
->event_interrupt
);
4111 spin_lock_init(&sep
->snd_rply_lck
);
4112 spin_lock_init(&sep
->sep_queue_lock
);
4113 sema_init(&sep
->sep_doublebuf
, SEP_DOUBLEBUF_USERS_LIMIT
);
4115 INIT_LIST_HEAD(&sep
->sep_queue_status
);
4117 dev_dbg(&sep
->pdev
->dev
,
4118 "sep probe: PCI obtained, device being prepared\n");
4120 /* Set up our register area */
4121 sep
->reg_physical_addr
= pci_resource_start(sep
->pdev
, 0);
4122 if (!sep
->reg_physical_addr
) {
4123 dev_warn(&sep
->pdev
->dev
, "Error getting register start\n");
4125 goto end_function_free_sep_dev
;
4128 sep
->reg_physical_end
= pci_resource_end(sep
->pdev
, 0);
4129 if (!sep
->reg_physical_end
) {
4130 dev_warn(&sep
->pdev
->dev
, "Error getting register end\n");
4132 goto end_function_free_sep_dev
;
4135 sep
->reg_addr
= ioremap_nocache(sep
->reg_physical_addr
,
4136 (size_t)(sep
->reg_physical_end
- sep
->reg_physical_addr
+ 1));
4137 if (!sep
->reg_addr
) {
4138 dev_warn(&sep
->pdev
->dev
, "Error getting register virtual\n");
4140 goto end_function_free_sep_dev
;
4143 dev_dbg(&sep
->pdev
->dev
,
4144 "Register area start %llx end %llx virtual %p\n",
4145 (unsigned long long)sep
->reg_physical_addr
,
4146 (unsigned long long)sep
->reg_physical_end
,
4149 /* Allocate the shared area */
4150 sep
->shared_size
= SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
+
4151 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
+
4152 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES
+
4153 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES
+
4154 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES
;
4156 if (sep_map_and_alloc_shared_area(sep
)) {
4158 /* Allocation failed */
4159 goto end_function_error
;
4162 /* Clear ICR register */
4163 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4165 /* Set the IMR register - open only GPR 2 */
4166 sep_write_reg(sep
, HW_HOST_IMR_REG_ADDR
, (~(0x1 << 13)));
4168 /* Read send/receive counters from SEP */
4169 sep
->reply_ct
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
4170 sep
->reply_ct
&= 0x3FFFFFFF;
4171 sep
->send_ct
= sep
->reply_ct
;
4173 /* Get the interrupt line */
4174 error
= request_irq(pdev
->irq
, sep_inthandler
, IRQF_SHARED
,
4178 goto end_function_deallocate_sep_shared_area
;
4180 /* The new chip requires a shared area reconfigure */
4181 error
= sep_reconfig_shared_area(sep
);
4183 goto end_function_free_irq
;
4187 /* Finally magic up the device nodes */
4188 /* Register driver with the fs */
4189 error
= sep_register_driver_with_fs(sep
);
4192 dev_err(&sep
->pdev
->dev
, "error registering dev file\n");
4193 goto end_function_free_irq
;
4196 sep
->in_use
= 0; /* through touching the device */
4197 #ifdef SEP_ENABLE_RUNTIME_PM
4198 pm_runtime_put_noidle(&sep
->pdev
->dev
);
4199 pm_runtime_allow(&sep
->pdev
->dev
);
4200 pm_runtime_set_autosuspend_delay(&sep
->pdev
->dev
,
4202 pm_runtime_use_autosuspend(&sep
->pdev
->dev
);
4203 pm_runtime_mark_last_busy(&sep
->pdev
->dev
);
4204 sep
->power_save_setup
= 1;
4206 /* register kernel crypto driver */
4207 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4208 error
= sep_crypto_setup();
4210 dev_err(&sep
->pdev
->dev
, "crypto setup failed\n");
4211 goto end_function_free_irq
;
4216 end_function_free_irq
:
4217 free_irq(pdev
->irq
, sep
);
4219 end_function_deallocate_sep_shared_area
:
4220 /* De-allocate shared area */
4221 sep_unmap_and_free_shared_area(sep
);
4224 iounmap(sep
->reg_addr
);
4226 end_function_free_sep_dev
:
4227 pci_dev_put(sep_dev
->pdev
);
4231 end_function_disable_device
:
4232 pci_disable_device(pdev
);
4239 * sep_remove - handles removing device from pci subsystem
4240 * @pdev: pointer to pci device
4242 * This function will handle removing our sep device from pci subsystem on exit
4243 * or unloading this module. It should free up all used resources, and unmap if
4244 * any memory regions mapped.
4246 static void sep_remove(struct pci_dev
*pdev
)
4248 struct sep_device
*sep
= sep_dev
;
4250 /* Unregister from fs */
4251 misc_deregister(&sep
->miscdev_sep
);
4253 /* Unregister from kernel crypto */
4254 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4255 sep_crypto_takedown();
4258 free_irq(sep
->pdev
->irq
, sep
);
4260 /* Free the shared area */
4261 sep_unmap_and_free_shared_area(sep_dev
);
4262 iounmap(sep_dev
->reg_addr
);
4264 #ifdef SEP_ENABLE_RUNTIME_PM
4267 pm_runtime_forbid(&sep
->pdev
->dev
);
4268 pm_runtime_get_noresume(&sep
->pdev
->dev
);
4271 pci_dev_put(sep_dev
->pdev
);
4276 /* Initialize struct pci_device_id for our driver */
4277 static const struct pci_device_id sep_pci_id_tbl
[] = {
4278 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x0826)},
4279 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x08e9)},
4283 /* Export our pci_device_id structure to user space */
4284 MODULE_DEVICE_TABLE(pci
, sep_pci_id_tbl
);
4286 #ifdef SEP_ENABLE_RUNTIME_PM
4289 * sep_pm_resume - rsume routine while waking up from S3 state
4290 * @dev: pointer to sep device
4292 * This function is to be used to wake up sep driver while system awakes from S3
4293 * state i.e. suspend to ram. The RAM in intact.
4294 * Notes - revisit with more understanding of pm, ICR/IMR & counters.
4296 static int sep_pci_resume(struct device
*dev
)
4298 struct sep_device
*sep
= sep_dev
;
4300 dev_dbg(&sep
->pdev
->dev
, "pci resume called\n");
4302 if (sep
->power_state
== SEP_DRIVER_POWERON
)
4305 /* Clear ICR register */
4306 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4308 /* Set the IMR register - open only GPR 2 */
4309 sep_write_reg(sep
, HW_HOST_IMR_REG_ADDR
, (~(0x1 << 13)));
4311 /* Read send/receive counters from SEP */
4312 sep
->reply_ct
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
4313 sep
->reply_ct
&= 0x3FFFFFFF;
4314 sep
->send_ct
= sep
->reply_ct
;
4316 sep
->power_state
= SEP_DRIVER_POWERON
;
4322 * sep_pm_suspend - suspend routine while going to S3 state
4323 * @dev: pointer to sep device
4325 * This function is to be used to suspend sep driver while system goes to S3
4326 * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
4327 * Notes - revisit with more understanding of pm, ICR/IMR
4329 static int sep_pci_suspend(struct device
*dev
)
4331 struct sep_device
*sep
= sep_dev
;
4333 dev_dbg(&sep
->pdev
->dev
, "pci suspend called\n");
4334 if (sep
->in_use
== 1)
4337 sep
->power_state
= SEP_DRIVER_POWEROFF
;
4339 /* Clear ICR register */
4340 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4342 /* Set the IMR to block all */
4343 sep_write_reg(sep
, HW_HOST_IMR_REG_ADDR
, 0xFFFFFFFF);
4349 * sep_pm_runtime_resume - runtime resume routine
4350 * @dev: pointer to sep device
4352 * Notes - revisit with more understanding of pm, ICR/IMR & counters
4354 static int sep_pm_runtime_resume(struct device
*dev
)
4359 struct sep_device
*sep
= sep_dev
;
4361 dev_dbg(&sep
->pdev
->dev
, "pm runtime resume called\n");
4364 * Wait until the SCU boot is ready
4365 * This is done by iterating SCU_DELAY_ITERATION (10
4366 * microseconds each) up to SCU_DELAY_MAX (50) times.
4367 * This bit can be set in a random time that is less
4368 * than 500 microseconds after each power resume
4372 while ((!retval2
) && (delay_count
< SCU_DELAY_MAX
)) {
4373 retval2
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR3_REG_ADDR
);
4374 retval2
&= 0x00000008;
4376 udelay(SCU_DELAY_ITERATION
);
4382 dev_warn(&sep
->pdev
->dev
, "scu boot bit not set at resume\n");
4386 /* Clear ICR register */
4387 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4389 /* Set the IMR register - open only GPR 2 */
4390 sep_write_reg(sep
, HW_HOST_IMR_REG_ADDR
, (~(0x1 << 13)));
4392 /* Read send/receive counters from SEP */
4393 sep
->reply_ct
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
4394 sep
->reply_ct
&= 0x3FFFFFFF;
4395 sep
->send_ct
= sep
->reply_ct
;
4401 * sep_pm_runtime_suspend - runtime suspend routine
4402 * @dev: pointer to sep device
4404 * Notes - revisit with more understanding of pm
4406 static int sep_pm_runtime_suspend(struct device
*dev
)
4408 struct sep_device
*sep
= sep_dev
;
4410 dev_dbg(&sep
->pdev
->dev
, "pm runtime suspend called\n");
4412 /* Clear ICR register */
4413 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4418 * sep_pm - power management for sep driver
4419 * @sep_pm_runtime_resume: resume- no communication with cpu & main memory
4420 * @sep_pm_runtime_suspend: suspend- no communication with cpu & main memory
4421 * @sep_pci_suspend: suspend - main memory is still ON
4422 * @sep_pci_resume: resume - main memory is still ON
4424 static const struct dev_pm_ops sep_pm
= {
4425 .runtime_resume
= sep_pm_runtime_resume
,
4426 .runtime_suspend
= sep_pm_runtime_suspend
,
4427 .resume
= sep_pci_resume
,
4428 .suspend
= sep_pci_suspend
,
4430 #endif /* SEP_ENABLE_RUNTIME_PM */
4433 * sep_pci_driver - registers this device with pci subsystem
4434 * @name: name identifier for this driver
4435 * @sep_pci_id_tbl: pointer to struct pci_device_id table
4436 * @sep_probe: pointer to probe function in PCI driver
4437 * @sep_remove: pointer to remove function in PCI driver
4439 static struct pci_driver sep_pci_driver
= {
4440 #ifdef SEP_ENABLE_RUNTIME_PM
4445 .name
= "sep_sec_driver",
4446 .id_table
= sep_pci_id_tbl
,
4448 .remove
= sep_remove
4451 module_pci_driver(sep_pci_driver
);
4452 MODULE_LICENSE("GPL");