2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 * Author: Gary R Hook <gary.hook@amd.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/kthread.h>
17 #include <linux/sched.h>
18 #include <linux/interrupt.h>
19 #include <linux/spinlock.h>
20 #include <linux/spinlock_types.h>
21 #include <linux/types.h>
22 #include <linux/mutex.h>
23 #include <linux/delay.h>
24 #include <linux/hw_random.h>
25 #include <linux/cpu.h>
27 #include <asm/cpu_device_id.h>
29 #include <linux/ccp.h>
33 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
34 MODULE_LICENSE("GPL");
35 MODULE_VERSION("1.0.0");
36 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
38 struct ccp_tasklet_data
{
39 struct completion completion
;
43 /* List of CCPs, CCP count, read-write access lock, and access functions
45 * Lock structure: get ccp_unit_lock for reading whenever we need to
46 * examine the CCP list. While holding it for reading we can acquire
47 * the RR lock to update the round-robin next-CCP pointer. The unit lock
48 * must be acquired before the RR lock.
50 * If the unit-lock is acquired for writing, we have total control over
51 * the list, so there's no value in getting the RR lock.
53 static DEFINE_RWLOCK(ccp_unit_lock
);
54 static LIST_HEAD(ccp_units
);
56 /* Round-robin counter */
57 static DEFINE_SPINLOCK(ccp_rr_lock
);
58 static struct ccp_device
*ccp_rr
;
60 /* Ever-increasing value to produce unique unit numbers */
61 static atomic_t ccp_unit_ordinal
;
62 static unsigned int ccp_increment_unit_ordinal(void)
64 return atomic_inc_return(&ccp_unit_ordinal
);
68 * ccp_add_device - add a CCP device to the list
70 * @ccp: ccp_device struct pointer
72 * Put this CCP on the unit list, which makes it available
75 * Returns zero if a CCP device is present, -ENODEV otherwise.
77 void ccp_add_device(struct ccp_device
*ccp
)
81 write_lock_irqsave(&ccp_unit_lock
, flags
);
82 list_add_tail(&ccp
->entry
, &ccp_units
);
84 /* We already have the list lock (we're first) so this
85 * pointer can't change on us. Set its initial value.
88 write_unlock_irqrestore(&ccp_unit_lock
, flags
);
92 * ccp_del_device - remove a CCP device from the list
94 * @ccp: ccp_device struct pointer
96 * Remove this unit from the list of devices. If the next device
97 * up for use is this one, adjust the pointer. If this is the last
98 * device, NULL the pointer.
100 void ccp_del_device(struct ccp_device
*ccp
)
104 write_lock_irqsave(&ccp_unit_lock
, flags
);
106 /* ccp_unit_lock is read/write; any read access
107 * will be suspended while we make changes to the
108 * list and RR pointer.
110 if (list_is_last(&ccp_rr
->entry
, &ccp_units
))
111 ccp_rr
= list_first_entry(&ccp_units
, struct ccp_device
,
114 ccp_rr
= list_next_entry(ccp_rr
, entry
);
116 list_del(&ccp
->entry
);
117 if (list_empty(&ccp_units
))
119 write_unlock_irqrestore(&ccp_unit_lock
, flags
);
124 int ccp_register_rng(struct ccp_device
*ccp
)
128 dev_dbg(ccp
->dev
, "Registering RNG...\n");
129 /* Register an RNG */
130 ccp
->hwrng
.name
= ccp
->rngname
;
131 ccp
->hwrng
.read
= ccp_trng_read
;
132 ret
= hwrng_register(&ccp
->hwrng
);
134 dev_err(ccp
->dev
, "error registering hwrng (%d)\n", ret
);
139 void ccp_unregister_rng(struct ccp_device
*ccp
)
142 hwrng_unregister(&ccp
->hwrng
);
145 static struct ccp_device
*ccp_get_device(void)
148 struct ccp_device
*dp
= NULL
;
150 /* We round-robin through the unit list.
151 * The (ccp_rr) pointer refers to the next unit to use.
153 read_lock_irqsave(&ccp_unit_lock
, flags
);
154 if (!list_empty(&ccp_units
)) {
155 spin_lock(&ccp_rr_lock
);
157 if (list_is_last(&ccp_rr
->entry
, &ccp_units
))
158 ccp_rr
= list_first_entry(&ccp_units
, struct ccp_device
,
161 ccp_rr
= list_next_entry(ccp_rr
, entry
);
162 spin_unlock(&ccp_rr_lock
);
164 read_unlock_irqrestore(&ccp_unit_lock
, flags
);
170 * ccp_present - check if a CCP device is present
172 * Returns zero if a CCP device is present, -ENODEV otherwise.
174 int ccp_present(void)
179 read_lock_irqsave(&ccp_unit_lock
, flags
);
180 ret
= list_empty(&ccp_units
);
181 read_unlock_irqrestore(&ccp_unit_lock
, flags
);
183 return ret
? -ENODEV
: 0;
185 EXPORT_SYMBOL_GPL(ccp_present
);
188 * ccp_version - get the version of the CCP device
190 * Returns the version from the first unit on the list;
191 * otherwise a zero if no CCP device is present
193 unsigned int ccp_version(void)
195 struct ccp_device
*dp
;
199 read_lock_irqsave(&ccp_unit_lock
, flags
);
200 if (!list_empty(&ccp_units
)) {
201 dp
= list_first_entry(&ccp_units
, struct ccp_device
, entry
);
202 ret
= dp
->vdata
->version
;
204 read_unlock_irqrestore(&ccp_unit_lock
, flags
);
208 EXPORT_SYMBOL_GPL(ccp_version
);
211 * ccp_enqueue_cmd - queue an operation for processing by the CCP
213 * @cmd: ccp_cmd struct to be processed
215 * Queue a cmd to be processed by the CCP. If queueing the cmd
216 * would exceed the defined length of the cmd queue the cmd will
217 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
218 * result in a return code of -EBUSY.
220 * The callback routine specified in the ccp_cmd struct will be
221 * called to notify the caller of completion (if the cmd was not
222 * backlogged) or advancement out of the backlog. If the cmd has
223 * advanced out of the backlog the "err" value of the callback
224 * will be -EINPROGRESS. Any other "err" value during callback is
225 * the result of the operation.
227 * The cmd has been successfully queued if:
228 * the return code is -EINPROGRESS or
229 * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
231 int ccp_enqueue_cmd(struct ccp_cmd
*cmd
)
233 struct ccp_device
*ccp
= ccp_get_device();
241 /* Caller must supply a callback routine */
247 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
249 i
= ccp
->cmd_q_count
;
251 if (ccp
->cmd_count
>= MAX_CMD_QLEN
) {
253 if (cmd
->flags
& CCP_CMD_MAY_BACKLOG
)
254 list_add_tail(&cmd
->entry
, &ccp
->backlog
);
258 list_add_tail(&cmd
->entry
, &ccp
->cmd
);
260 /* Find an idle queue */
261 if (!ccp
->suspending
) {
262 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
263 if (ccp
->cmd_q
[i
].active
)
271 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
273 /* If we found an idle queue, wake it up */
274 if (i
< ccp
->cmd_q_count
)
275 wake_up_process(ccp
->cmd_q
[i
].kthread
);
279 EXPORT_SYMBOL_GPL(ccp_enqueue_cmd
);
281 static void ccp_do_cmd_backlog(struct work_struct
*work
)
283 struct ccp_cmd
*cmd
= container_of(work
, struct ccp_cmd
, work
);
284 struct ccp_device
*ccp
= cmd
->ccp
;
288 cmd
->callback(cmd
->data
, -EINPROGRESS
);
290 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
293 list_add_tail(&cmd
->entry
, &ccp
->cmd
);
295 /* Find an idle queue */
296 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
297 if (ccp
->cmd_q
[i
].active
)
303 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
305 /* If we found an idle queue, wake it up */
306 if (i
< ccp
->cmd_q_count
)
307 wake_up_process(ccp
->cmd_q
[i
].kthread
);
310 static struct ccp_cmd
*ccp_dequeue_cmd(struct ccp_cmd_queue
*cmd_q
)
312 struct ccp_device
*ccp
= cmd_q
->ccp
;
313 struct ccp_cmd
*cmd
= NULL
;
314 struct ccp_cmd
*backlog
= NULL
;
317 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
321 if (ccp
->suspending
) {
322 cmd_q
->suspended
= 1;
324 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
325 wake_up_interruptible(&ccp
->suspend_queue
);
330 if (ccp
->cmd_count
) {
333 cmd
= list_first_entry(&ccp
->cmd
, struct ccp_cmd
, entry
);
334 list_del(&cmd
->entry
);
339 if (!list_empty(&ccp
->backlog
)) {
340 backlog
= list_first_entry(&ccp
->backlog
, struct ccp_cmd
,
342 list_del(&backlog
->entry
);
345 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
348 INIT_WORK(&backlog
->work
, ccp_do_cmd_backlog
);
349 schedule_work(&backlog
->work
);
355 static void ccp_do_cmd_complete(unsigned long data
)
357 struct ccp_tasklet_data
*tdata
= (struct ccp_tasklet_data
*)data
;
358 struct ccp_cmd
*cmd
= tdata
->cmd
;
360 cmd
->callback(cmd
->data
, cmd
->ret
);
361 complete(&tdata
->completion
);
365 * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue
367 * @data: thread-specific data
369 int ccp_cmd_queue_thread(void *data
)
371 struct ccp_cmd_queue
*cmd_q
= (struct ccp_cmd_queue
*)data
;
373 struct ccp_tasklet_data tdata
;
374 struct tasklet_struct tasklet
;
376 tasklet_init(&tasklet
, ccp_do_cmd_complete
, (unsigned long)&tdata
);
378 set_current_state(TASK_INTERRUPTIBLE
);
379 while (!kthread_should_stop()) {
382 set_current_state(TASK_INTERRUPTIBLE
);
384 cmd
= ccp_dequeue_cmd(cmd_q
);
388 __set_current_state(TASK_RUNNING
);
390 /* Execute the command */
391 cmd
->ret
= ccp_run_cmd(cmd_q
, cmd
);
393 /* Schedule the completion callback */
395 init_completion(&tdata
.completion
);
396 tasklet_schedule(&tasklet
);
397 wait_for_completion(&tdata
.completion
);
400 __set_current_state(TASK_RUNNING
);
406 * ccp_alloc_struct - allocate and initialize the ccp_device struct
408 * @dev: device struct of the CCP
410 struct ccp_device
*ccp_alloc_struct(struct device
*dev
)
412 struct ccp_device
*ccp
;
414 ccp
= devm_kzalloc(dev
, sizeof(*ccp
), GFP_KERNEL
);
419 INIT_LIST_HEAD(&ccp
->cmd
);
420 INIT_LIST_HEAD(&ccp
->backlog
);
422 spin_lock_init(&ccp
->cmd_lock
);
423 mutex_init(&ccp
->req_mutex
);
424 mutex_init(&ccp
->sb_mutex
);
425 ccp
->sb_count
= KSB_COUNT
;
428 ccp
->ord
= ccp_increment_unit_ordinal();
429 snprintf(ccp
->name
, MAX_CCP_NAME_LEN
, "ccp-%u", ccp
->ord
);
430 snprintf(ccp
->rngname
, MAX_CCP_NAME_LEN
, "ccp-%u-rng", ccp
->ord
);
435 int ccp_trng_read(struct hwrng
*rng
, void *data
, size_t max
, bool wait
)
437 struct ccp_device
*ccp
= container_of(rng
, struct ccp_device
, hwrng
);
439 int len
= min_t(int, sizeof(trng_value
), max
);
441 /* Locking is provided by the caller so we can update device
442 * hwrng-related fields safely
444 trng_value
= ioread32(ccp
->io_regs
+ TRNG_OUT_REG
);
446 /* Zero is returned if not data is available or if a
447 * bad-entropy error is present. Assume an error if
448 * we exceed TRNG_RETRIES reads of zero.
450 if (ccp
->hwrng_retries
++ > TRNG_RETRIES
)
456 /* Reset the counter and save the rng value */
457 ccp
->hwrng_retries
= 0;
458 memcpy(data
, &trng_value
, len
);
464 bool ccp_queues_suspended(struct ccp_device
*ccp
)
466 unsigned int suspended
= 0;
470 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
472 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
473 if (ccp
->cmd_q
[i
].suspended
)
476 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
478 return ccp
->cmd_q_count
== suspended
;
482 static int __init
ccp_mod_init(void)
487 ret
= ccp_pci_init();
491 /* Don't leave the driver loaded if init failed */
492 if (ccp_present() != 0) {
503 ret
= ccp_platform_init();
507 /* Don't leave the driver loaded if init failed */
508 if (ccp_present() != 0) {
519 static void __exit
ccp_mod_exit(void)
530 module_init(ccp_mod_init
);
531 module_exit(ccp_mod_exit
);