2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/kthread.h>
16 #include <linux/sched.h>
17 #include <linux/interrupt.h>
18 #include <linux/spinlock.h>
19 #include <linux/spinlock_types.h>
20 #include <linux/types.h>
21 #include <linux/mutex.h>
22 #include <linux/delay.h>
23 #include <linux/hw_random.h>
24 #include <linux/cpu.h>
26 #include <asm/cpu_device_id.h>
28 #include <linux/ccp.h>
32 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
33 MODULE_LICENSE("GPL");
34 MODULE_VERSION("1.0.0");
35 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
37 struct ccp_tasklet_data
{
38 struct completion completion
;
42 /* List of CCPs, CCP count, read-write access lock, and access functions
44 * Lock structure: get ccp_unit_lock for reading whenever we need to
45 * examine the CCP list. While holding it for reading we can acquire
46 * the RR lock to update the round-robin next-CCP pointer. The unit lock
47 * must be acquired before the RR lock.
49 * If the unit-lock is acquired for writing, we have total control over
50 * the list, so there's no value in getting the RR lock.
52 static DEFINE_RWLOCK(ccp_unit_lock
);
53 static LIST_HEAD(ccp_units
);
55 /* Round-robin counter */
56 static DEFINE_SPINLOCK(ccp_rr_lock
);
57 static struct ccp_device
*ccp_rr
;
59 /* Ever-increasing value to produce unique unit numbers */
60 static atomic_t ccp_unit_ordinal
;
61 unsigned int ccp_increment_unit_ordinal(void)
63 return atomic_inc_return(&ccp_unit_ordinal
);
67 * ccp_add_device - add a CCP device to the list
69 * @ccp: ccp_device struct pointer
71 * Put this CCP on the unit list, which makes it available
74 * Returns zero if a CCP device is present, -ENODEV otherwise.
76 void ccp_add_device(struct ccp_device
*ccp
)
80 write_lock_irqsave(&ccp_unit_lock
, flags
);
81 list_add_tail(&ccp
->entry
, &ccp_units
);
83 /* We already have the list lock (we're first) so this
84 * pointer can't change on us. Set its initial value.
87 write_unlock_irqrestore(&ccp_unit_lock
, flags
);
91 * ccp_del_device - remove a CCP device from the list
93 * @ccp: ccp_device struct pointer
95 * Remove this unit from the list of devices. If the next device
96 * up for use is this one, adjust the pointer. If this is the last
97 * device, NULL the pointer.
99 void ccp_del_device(struct ccp_device
*ccp
)
103 write_lock_irqsave(&ccp_unit_lock
, flags
);
105 /* ccp_unit_lock is read/write; any read access
106 * will be suspended while we make changes to the
107 * list and RR pointer.
109 if (list_is_last(&ccp_rr
->entry
, &ccp_units
))
110 ccp_rr
= list_first_entry(&ccp_units
, struct ccp_device
,
113 ccp_rr
= list_next_entry(ccp_rr
, entry
);
115 list_del(&ccp
->entry
);
116 if (list_empty(&ccp_units
))
118 write_unlock_irqrestore(&ccp_unit_lock
, flags
);
121 static struct ccp_device
*ccp_get_device(void)
124 struct ccp_device
*dp
= NULL
;
126 /* We round-robin through the unit list.
127 * The (ccp_rr) pointer refers to the next unit to use.
129 read_lock_irqsave(&ccp_unit_lock
, flags
);
130 if (!list_empty(&ccp_units
)) {
131 spin_lock(&ccp_rr_lock
);
133 if (list_is_last(&ccp_rr
->entry
, &ccp_units
))
134 ccp_rr
= list_first_entry(&ccp_units
, struct ccp_device
,
137 ccp_rr
= list_next_entry(ccp_rr
, entry
);
138 spin_unlock(&ccp_rr_lock
);
140 read_unlock_irqrestore(&ccp_unit_lock
, flags
);
146 * ccp_present - check if a CCP device is present
148 * Returns zero if a CCP device is present, -ENODEV otherwise.
150 int ccp_present(void)
155 read_lock_irqsave(&ccp_unit_lock
, flags
);
156 ret
= list_empty(&ccp_units
);
157 read_unlock_irqrestore(&ccp_unit_lock
, flags
);
159 return ret
? -ENODEV
: 0;
161 EXPORT_SYMBOL_GPL(ccp_present
);
164 * ccp_version - get the version of the CCP device
166 * Returns the version from the first unit on the list;
167 * otherwise a zero if no CCP device is present
169 unsigned int ccp_version(void)
171 struct ccp_device
*dp
;
175 read_lock_irqsave(&ccp_unit_lock
, flags
);
176 if (!list_empty(&ccp_units
)) {
177 dp
= list_first_entry(&ccp_units
, struct ccp_device
, entry
);
178 ret
= dp
->vdata
->version
;
180 read_unlock_irqrestore(&ccp_unit_lock
, flags
);
184 EXPORT_SYMBOL_GPL(ccp_version
);
187 * ccp_enqueue_cmd - queue an operation for processing by the CCP
189 * @cmd: ccp_cmd struct to be processed
191 * Queue a cmd to be processed by the CCP. If queueing the cmd
192 * would exceed the defined length of the cmd queue the cmd will
193 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
194 * result in a return code of -EBUSY.
196 * The callback routine specified in the ccp_cmd struct will be
197 * called to notify the caller of completion (if the cmd was not
198 * backlogged) or advancement out of the backlog. If the cmd has
199 * advanced out of the backlog the "err" value of the callback
200 * will be -EINPROGRESS. Any other "err" value during callback is
201 * the result of the operation.
203 * The cmd has been successfully queued if:
204 * the return code is -EINPROGRESS or
205 * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
207 int ccp_enqueue_cmd(struct ccp_cmd
*cmd
)
209 struct ccp_device
*ccp
= ccp_get_device();
217 /* Caller must supply a callback routine */
223 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
225 i
= ccp
->cmd_q_count
;
227 if (ccp
->cmd_count
>= MAX_CMD_QLEN
) {
229 if (cmd
->flags
& CCP_CMD_MAY_BACKLOG
)
230 list_add_tail(&cmd
->entry
, &ccp
->backlog
);
234 list_add_tail(&cmd
->entry
, &ccp
->cmd
);
236 /* Find an idle queue */
237 if (!ccp
->suspending
) {
238 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
239 if (ccp
->cmd_q
[i
].active
)
247 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
249 /* If we found an idle queue, wake it up */
250 if (i
< ccp
->cmd_q_count
)
251 wake_up_process(ccp
->cmd_q
[i
].kthread
);
255 EXPORT_SYMBOL_GPL(ccp_enqueue_cmd
);
257 static void ccp_do_cmd_backlog(struct work_struct
*work
)
259 struct ccp_cmd
*cmd
= container_of(work
, struct ccp_cmd
, work
);
260 struct ccp_device
*ccp
= cmd
->ccp
;
264 cmd
->callback(cmd
->data
, -EINPROGRESS
);
266 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
269 list_add_tail(&cmd
->entry
, &ccp
->cmd
);
271 /* Find an idle queue */
272 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
273 if (ccp
->cmd_q
[i
].active
)
279 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
281 /* If we found an idle queue, wake it up */
282 if (i
< ccp
->cmd_q_count
)
283 wake_up_process(ccp
->cmd_q
[i
].kthread
);
286 static struct ccp_cmd
*ccp_dequeue_cmd(struct ccp_cmd_queue
*cmd_q
)
288 struct ccp_device
*ccp
= cmd_q
->ccp
;
289 struct ccp_cmd
*cmd
= NULL
;
290 struct ccp_cmd
*backlog
= NULL
;
293 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
297 if (ccp
->suspending
) {
298 cmd_q
->suspended
= 1;
300 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
301 wake_up_interruptible(&ccp
->suspend_queue
);
306 if (ccp
->cmd_count
) {
309 cmd
= list_first_entry(&ccp
->cmd
, struct ccp_cmd
, entry
);
310 list_del(&cmd
->entry
);
315 if (!list_empty(&ccp
->backlog
)) {
316 backlog
= list_first_entry(&ccp
->backlog
, struct ccp_cmd
,
318 list_del(&backlog
->entry
);
321 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
324 INIT_WORK(&backlog
->work
, ccp_do_cmd_backlog
);
325 schedule_work(&backlog
->work
);
331 static void ccp_do_cmd_complete(unsigned long data
)
333 struct ccp_tasklet_data
*tdata
= (struct ccp_tasklet_data
*)data
;
334 struct ccp_cmd
*cmd
= tdata
->cmd
;
336 cmd
->callback(cmd
->data
, cmd
->ret
);
337 complete(&tdata
->completion
);
341 * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue
343 * @data: thread-specific data
345 int ccp_cmd_queue_thread(void *data
)
347 struct ccp_cmd_queue
*cmd_q
= (struct ccp_cmd_queue
*)data
;
349 struct ccp_tasklet_data tdata
;
350 struct tasklet_struct tasklet
;
352 tasklet_init(&tasklet
, ccp_do_cmd_complete
, (unsigned long)&tdata
);
354 set_current_state(TASK_INTERRUPTIBLE
);
355 while (!kthread_should_stop()) {
358 set_current_state(TASK_INTERRUPTIBLE
);
360 cmd
= ccp_dequeue_cmd(cmd_q
);
364 __set_current_state(TASK_RUNNING
);
366 /* Execute the command */
367 cmd
->ret
= ccp_run_cmd(cmd_q
, cmd
);
369 /* Schedule the completion callback */
371 init_completion(&tdata
.completion
);
372 tasklet_schedule(&tasklet
);
373 wait_for_completion(&tdata
.completion
);
376 __set_current_state(TASK_RUNNING
);
382 * ccp_alloc_struct - allocate and initialize the ccp_device struct
384 * @dev: device struct of the CCP
386 struct ccp_device
*ccp_alloc_struct(struct device
*dev
)
388 struct ccp_device
*ccp
;
390 ccp
= devm_kzalloc(dev
, sizeof(*ccp
), GFP_KERNEL
);
395 INIT_LIST_HEAD(&ccp
->cmd
);
396 INIT_LIST_HEAD(&ccp
->backlog
);
398 spin_lock_init(&ccp
->cmd_lock
);
399 mutex_init(&ccp
->req_mutex
);
400 mutex_init(&ccp
->ksb_mutex
);
401 ccp
->ksb_count
= KSB_COUNT
;
404 ccp
->ord
= ccp_increment_unit_ordinal();
405 snprintf(ccp
->name
, MAX_CCP_NAME_LEN
, "ccp-%u", ccp
->ord
);
406 snprintf(ccp
->rngname
, MAX_CCP_NAME_LEN
, "ccp-%u-rng", ccp
->ord
);
412 bool ccp_queues_suspended(struct ccp_device
*ccp
)
414 unsigned int suspended
= 0;
418 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
420 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
421 if (ccp
->cmd_q
[i
].suspended
)
424 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
426 return ccp
->cmd_q_count
== suspended
;
430 static int __init
ccp_mod_init(void)
435 ret
= ccp_pci_init();
439 /* Don't leave the driver loaded if init failed */
440 if (ccp_present() != 0) {
451 ret
= ccp_platform_init();
455 /* Don't leave the driver loaded if init failed */
456 if (ccp_present() != 0) {
467 static void __exit
ccp_mod_exit(void)
478 module_init(ccp_mod_init
);
479 module_exit(ccp_mod_exit
);