Commit | Line | Data |
---|---|---|
63b94509 TL |
1 | /* |
2 | * AMD Cryptographic Coprocessor (CCP) driver | |
3 | * | |
3f19ce20 | 4 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. |
63b94509 TL |
5 | * |
6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | |
956ee21a | 7 | * Author: Gary R Hook <gary.hook@amd.com> |
63b94509 TL |
8 | * |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
13 | ||
14 | #include <linux/module.h> | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/kthread.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/spinlock.h> | |
7587c407 | 20 | #include <linux/spinlock_types.h> |
553d2374 | 21 | #include <linux/types.h> |
63b94509 TL |
22 | #include <linux/mutex.h> |
23 | #include <linux/delay.h> | |
24 | #include <linux/hw_random.h> | |
25 | #include <linux/cpu.h> | |
c4f4b325 | 26 | #ifdef CONFIG_X86 |
63b94509 | 27 | #include <asm/cpu_device_id.h> |
c4f4b325 | 28 | #endif |
63b94509 TL |
29 | #include <linux/ccp.h> |
30 | ||
31 | #include "ccp-dev.h" | |
32 | ||
33 | MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); | |
34 | MODULE_LICENSE("GPL"); | |
35 | MODULE_VERSION("1.0.0"); | |
36 | MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver"); | |
37 | ||
530abd89 TL |
38 | struct ccp_tasklet_data { |
39 | struct completion completion; | |
40 | struct ccp_cmd *cmd; | |
41 | }; | |
42 | ||
553d2374 GH |
43 | /* List of CCPs, CCP count, read-write access lock, and access functions |
44 | * | |
45 | * Lock structure: get ccp_unit_lock for reading whenever we need to | |
46 | * examine the CCP list. While holding it for reading we can acquire | |
47 | * the RR lock to update the round-robin next-CCP pointer. The unit lock | |
48 | * must be acquired before the RR lock. | |
49 | * | |
50 | * If the unit-lock is acquired for writing, we have total control over | |
51 | * the list, so there's no value in getting the RR lock. | |
52 | */ | |
53 | static DEFINE_RWLOCK(ccp_unit_lock); | |
54 | static LIST_HEAD(ccp_units); | |
55 | ||
56 | /* Round-robin counter */ | |
03a6f290 | 57 | static DEFINE_SPINLOCK(ccp_rr_lock); |
553d2374 GH |
58 | static struct ccp_device *ccp_rr; |
59 | ||
60 | /* Ever-increasing value to produce unique unit numbers */ | |
61 | static atomic_t ccp_unit_ordinal; | |
dabc7904 | 62 | static unsigned int ccp_increment_unit_ordinal(void) |
63b94509 | 63 | { |
553d2374 | 64 | return atomic_inc_return(&ccp_unit_ordinal); |
63b94509 TL |
65 | } |
66 | ||
ea0375af GH |
67 | /** |
68 | * ccp_add_device - add a CCP device to the list | |
69 | * | |
70 | * @ccp: ccp_device struct pointer | |
71 | * | |
553d2374 GH |
72 | * Put this CCP on the unit list, which makes it available |
73 | * for use. | |
ea0375af GH |
74 | * |
75 | * Returns zero if a CCP device is present, -ENODEV otherwise. | |
553d2374 | 76 | */ |
ea0375af | 77 | void ccp_add_device(struct ccp_device *ccp) |
63b94509 | 78 | { |
553d2374 GH |
79 | unsigned long flags; |
80 | ||
81 | write_lock_irqsave(&ccp_unit_lock, flags); | |
82 | list_add_tail(&ccp->entry, &ccp_units); | |
83 | if (!ccp_rr) | |
84 | /* We already have the list lock (we're first) so this | |
85 | * pointer can't change on us. Set its initial value. | |
86 | */ | |
87 | ccp_rr = ccp; | |
88 | write_unlock_irqrestore(&ccp_unit_lock, flags); | |
63b94509 TL |
89 | } |
90 | ||
ea0375af GH |
91 | /** |
92 | * ccp_del_device - remove a CCP device from the list | |
93 | * | |
94 | * @ccp: ccp_device struct pointer | |
95 | * | |
96 | * Remove this unit from the list of devices. If the next device | |
553d2374 GH |
97 | * up for use is this one, adjust the pointer. If this is the last |
98 | * device, NULL the pointer. | |
99 | */ | |
ea0375af | 100 | void ccp_del_device(struct ccp_device *ccp) |
63b94509 | 101 | { |
553d2374 GH |
102 | unsigned long flags; |
103 | ||
104 | write_lock_irqsave(&ccp_unit_lock, flags); | |
105 | if (ccp_rr == ccp) { | |
106 | /* ccp_unit_lock is read/write; any read access | |
107 | * will be suspended while we make changes to the | |
108 | * list and RR pointer. | |
109 | */ | |
110 | if (list_is_last(&ccp_rr->entry, &ccp_units)) | |
111 | ccp_rr = list_first_entry(&ccp_units, struct ccp_device, | |
112 | entry); | |
113 | else | |
114 | ccp_rr = list_next_entry(ccp_rr, entry); | |
115 | } | |
116 | list_del(&ccp->entry); | |
117 | if (list_empty(&ccp_units)) | |
118 | ccp_rr = NULL; | |
119 | write_unlock_irqrestore(&ccp_unit_lock, flags); | |
120 | } | |
121 | ||
084935b2 GH |
122 | |
123 | ||
124 | int ccp_register_rng(struct ccp_device *ccp) | |
125 | { | |
126 | int ret = 0; | |
127 | ||
128 | dev_dbg(ccp->dev, "Registering RNG...\n"); | |
129 | /* Register an RNG */ | |
130 | ccp->hwrng.name = ccp->rngname; | |
131 | ccp->hwrng.read = ccp_trng_read; | |
132 | ret = hwrng_register(&ccp->hwrng); | |
133 | if (ret) | |
134 | dev_err(ccp->dev, "error registering hwrng (%d)\n", ret); | |
135 | ||
136 | return ret; | |
137 | } | |
138 | ||
139 | void ccp_unregister_rng(struct ccp_device *ccp) | |
140 | { | |
141 | if (ccp->hwrng.name) | |
142 | hwrng_unregister(&ccp->hwrng); | |
143 | } | |
144 | ||
553d2374 GH |
145 | static struct ccp_device *ccp_get_device(void) |
146 | { | |
147 | unsigned long flags; | |
148 | struct ccp_device *dp = NULL; | |
149 | ||
150 | /* We round-robin through the unit list. | |
151 | * The (ccp_rr) pointer refers to the next unit to use. | |
152 | */ | |
153 | read_lock_irqsave(&ccp_unit_lock, flags); | |
154 | if (!list_empty(&ccp_units)) { | |
03a6f290 | 155 | spin_lock(&ccp_rr_lock); |
553d2374 GH |
156 | dp = ccp_rr; |
157 | if (list_is_last(&ccp_rr->entry, &ccp_units)) | |
158 | ccp_rr = list_first_entry(&ccp_units, struct ccp_device, | |
159 | entry); | |
160 | else | |
161 | ccp_rr = list_next_entry(ccp_rr, entry); | |
03a6f290 | 162 | spin_unlock(&ccp_rr_lock); |
553d2374 GH |
163 | } |
164 | read_unlock_irqrestore(&ccp_unit_lock, flags); | |
165 | ||
166 | return dp; | |
63b94509 TL |
167 | } |
168 | ||
c9f21cb6 TL |
169 | /** |
170 | * ccp_present - check if a CCP device is present | |
171 | * | |
172 | * Returns zero if a CCP device is present, -ENODEV otherwise. | |
173 | */ | |
174 | int ccp_present(void) | |
175 | { | |
553d2374 GH |
176 | unsigned long flags; |
177 | int ret; | |
c9f21cb6 | 178 | |
553d2374 GH |
179 | read_lock_irqsave(&ccp_unit_lock, flags); |
180 | ret = list_empty(&ccp_units); | |
181 | read_unlock_irqrestore(&ccp_unit_lock, flags); | |
182 | ||
183 | return ret ? -ENODEV : 0; | |
c9f21cb6 TL |
184 | } |
185 | EXPORT_SYMBOL_GPL(ccp_present); | |
186 | ||
c7019c4d GH |
187 | /** |
188 | * ccp_version - get the version of the CCP device | |
189 | * | |
190 | * Returns the version from the first unit on the list; | |
191 | * otherwise a zero if no CCP device is present | |
192 | */ | |
193 | unsigned int ccp_version(void) | |
194 | { | |
195 | struct ccp_device *dp; | |
196 | unsigned long flags; | |
197 | int ret = 0; | |
198 | ||
199 | read_lock_irqsave(&ccp_unit_lock, flags); | |
200 | if (!list_empty(&ccp_units)) { | |
201 | dp = list_first_entry(&ccp_units, struct ccp_device, entry); | |
202 | ret = dp->vdata->version; | |
203 | } | |
204 | read_unlock_irqrestore(&ccp_unit_lock, flags); | |
205 | ||
206 | return ret; | |
207 | } | |
208 | EXPORT_SYMBOL_GPL(ccp_version); | |
209 | ||
63b94509 TL |
210 | /** |
211 | * ccp_enqueue_cmd - queue an operation for processing by the CCP | |
212 | * | |
213 | * @cmd: ccp_cmd struct to be processed | |
214 | * | |
215 | * Queue a cmd to be processed by the CCP. If queueing the cmd | |
216 | * would exceed the defined length of the cmd queue the cmd will | |
217 | * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will | |
218 | * result in a return code of -EBUSY. | |
219 | * | |
220 | * The callback routine specified in the ccp_cmd struct will be | |
221 | * called to notify the caller of completion (if the cmd was not | |
222 | * backlogged) or advancement out of the backlog. If the cmd has | |
223 | * advanced out of the backlog the "err" value of the callback | |
224 | * will be -EINPROGRESS. Any other "err" value during callback is | |
225 | * the result of the operation. | |
226 | * | |
227 | * The cmd has been successfully queued if: | |
228 | * the return code is -EINPROGRESS or | |
229 | * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set | |
230 | */ | |
231 | int ccp_enqueue_cmd(struct ccp_cmd *cmd) | |
232 | { | |
233 | struct ccp_device *ccp = ccp_get_device(); | |
234 | unsigned long flags; | |
235 | unsigned int i; | |
236 | int ret; | |
237 | ||
238 | if (!ccp) | |
239 | return -ENODEV; | |
240 | ||
241 | /* Caller must supply a callback routine */ | |
242 | if (!cmd->callback) | |
243 | return -EINVAL; | |
244 | ||
245 | cmd->ccp = ccp; | |
246 | ||
247 | spin_lock_irqsave(&ccp->cmd_lock, flags); | |
248 | ||
249 | i = ccp->cmd_q_count; | |
250 | ||
251 | if (ccp->cmd_count >= MAX_CMD_QLEN) { | |
252 | ret = -EBUSY; | |
253 | if (cmd->flags & CCP_CMD_MAY_BACKLOG) | |
254 | list_add_tail(&cmd->entry, &ccp->backlog); | |
255 | } else { | |
256 | ret = -EINPROGRESS; | |
257 | ccp->cmd_count++; | |
258 | list_add_tail(&cmd->entry, &ccp->cmd); | |
259 | ||
260 | /* Find an idle queue */ | |
261 | if (!ccp->suspending) { | |
262 | for (i = 0; i < ccp->cmd_q_count; i++) { | |
263 | if (ccp->cmd_q[i].active) | |
264 | continue; | |
265 | ||
266 | break; | |
267 | } | |
268 | } | |
269 | } | |
270 | ||
271 | spin_unlock_irqrestore(&ccp->cmd_lock, flags); | |
272 | ||
273 | /* If we found an idle queue, wake it up */ | |
274 | if (i < ccp->cmd_q_count) | |
275 | wake_up_process(ccp->cmd_q[i].kthread); | |
276 | ||
277 | return ret; | |
278 | } | |
279 | EXPORT_SYMBOL_GPL(ccp_enqueue_cmd); | |
280 | ||
281 | static void ccp_do_cmd_backlog(struct work_struct *work) | |
282 | { | |
283 | struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work); | |
284 | struct ccp_device *ccp = cmd->ccp; | |
285 | unsigned long flags; | |
286 | unsigned int i; | |
287 | ||
288 | cmd->callback(cmd->data, -EINPROGRESS); | |
289 | ||
290 | spin_lock_irqsave(&ccp->cmd_lock, flags); | |
291 | ||
292 | ccp->cmd_count++; | |
293 | list_add_tail(&cmd->entry, &ccp->cmd); | |
294 | ||
295 | /* Find an idle queue */ | |
296 | for (i = 0; i < ccp->cmd_q_count; i++) { | |
297 | if (ccp->cmd_q[i].active) | |
298 | continue; | |
299 | ||
300 | break; | |
301 | } | |
302 | ||
303 | spin_unlock_irqrestore(&ccp->cmd_lock, flags); | |
304 | ||
305 | /* If we found an idle queue, wake it up */ | |
306 | if (i < ccp->cmd_q_count) | |
307 | wake_up_process(ccp->cmd_q[i].kthread); | |
308 | } | |
309 | ||
310 | static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q) | |
311 | { | |
312 | struct ccp_device *ccp = cmd_q->ccp; | |
313 | struct ccp_cmd *cmd = NULL; | |
314 | struct ccp_cmd *backlog = NULL; | |
315 | unsigned long flags; | |
316 | ||
317 | spin_lock_irqsave(&ccp->cmd_lock, flags); | |
318 | ||
319 | cmd_q->active = 0; | |
320 | ||
321 | if (ccp->suspending) { | |
322 | cmd_q->suspended = 1; | |
323 | ||
324 | spin_unlock_irqrestore(&ccp->cmd_lock, flags); | |
325 | wake_up_interruptible(&ccp->suspend_queue); | |
326 | ||
327 | return NULL; | |
328 | } | |
329 | ||
330 | if (ccp->cmd_count) { | |
331 | cmd_q->active = 1; | |
332 | ||
333 | cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); | |
334 | list_del(&cmd->entry); | |
335 | ||
336 | ccp->cmd_count--; | |
337 | } | |
338 | ||
339 | if (!list_empty(&ccp->backlog)) { | |
340 | backlog = list_first_entry(&ccp->backlog, struct ccp_cmd, | |
341 | entry); | |
342 | list_del(&backlog->entry); | |
343 | } | |
344 | ||
345 | spin_unlock_irqrestore(&ccp->cmd_lock, flags); | |
346 | ||
347 | if (backlog) { | |
348 | INIT_WORK(&backlog->work, ccp_do_cmd_backlog); | |
349 | schedule_work(&backlog->work); | |
350 | } | |
351 | ||
352 | return cmd; | |
353 | } | |
354 | ||
530abd89 | 355 | static void ccp_do_cmd_complete(unsigned long data) |
63b94509 | 356 | { |
530abd89 TL |
357 | struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data; |
358 | struct ccp_cmd *cmd = tdata->cmd; | |
63b94509 TL |
359 | |
360 | cmd->callback(cmd->data, cmd->ret); | |
530abd89 | 361 | complete(&tdata->completion); |
63b94509 TL |
362 | } |
363 | ||
ea0375af GH |
364 | /** |
365 | * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue | |
366 | * | |
367 | * @data: thread-specific data | |
368 | */ | |
369 | int ccp_cmd_queue_thread(void *data) | |
63b94509 TL |
370 | { |
371 | struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data; | |
372 | struct ccp_cmd *cmd; | |
530abd89 TL |
373 | struct ccp_tasklet_data tdata; |
374 | struct tasklet_struct tasklet; | |
375 | ||
376 | tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata); | |
63b94509 TL |
377 | |
378 | set_current_state(TASK_INTERRUPTIBLE); | |
379 | while (!kthread_should_stop()) { | |
380 | schedule(); | |
381 | ||
382 | set_current_state(TASK_INTERRUPTIBLE); | |
383 | ||
384 | cmd = ccp_dequeue_cmd(cmd_q); | |
385 | if (!cmd) | |
386 | continue; | |
387 | ||
388 | __set_current_state(TASK_RUNNING); | |
389 | ||
390 | /* Execute the command */ | |
391 | cmd->ret = ccp_run_cmd(cmd_q, cmd); | |
392 | ||
393 | /* Schedule the completion callback */ | |
530abd89 TL |
394 | tdata.cmd = cmd; |
395 | init_completion(&tdata.completion); | |
396 | tasklet_schedule(&tasklet); | |
397 | wait_for_completion(&tdata.completion); | |
63b94509 TL |
398 | } |
399 | ||
400 | __set_current_state(TASK_RUNNING); | |
401 | ||
402 | return 0; | |
403 | } | |
404 | ||
63b94509 TL |
405 | /** |
406 | * ccp_alloc_struct - allocate and initialize the ccp_device struct | |
407 | * | |
408 | * @dev: device struct of the CCP | |
409 | */ | |
410 | struct ccp_device *ccp_alloc_struct(struct device *dev) | |
411 | { | |
412 | struct ccp_device *ccp; | |
413 | ||
be03a3a0 | 414 | ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL); |
8db88467 | 415 | if (!ccp) |
63b94509 | 416 | return NULL; |
63b94509 TL |
417 | ccp->dev = dev; |
418 | ||
419 | INIT_LIST_HEAD(&ccp->cmd); | |
420 | INIT_LIST_HEAD(&ccp->backlog); | |
421 | ||
422 | spin_lock_init(&ccp->cmd_lock); | |
423 | mutex_init(&ccp->req_mutex); | |
956ee21a GH |
424 | mutex_init(&ccp->sb_mutex); |
425 | ccp->sb_count = KSB_COUNT; | |
426 | ccp->sb_start = 0; | |
63b94509 | 427 | |
553d2374 GH |
428 | ccp->ord = ccp_increment_unit_ordinal(); |
429 | snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord); | |
430 | snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord); | |
431 | ||
63b94509 TL |
432 | return ccp; |
433 | } | |
434 | ||
8256e683 GH |
435 | int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) |
436 | { | |
437 | struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng); | |
438 | u32 trng_value; | |
439 | int len = min_t(int, sizeof(trng_value), max); | |
440 | ||
441 | /* Locking is provided by the caller so we can update device | |
442 | * hwrng-related fields safely | |
443 | */ | |
444 | trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG); | |
445 | if (!trng_value) { | |
446 | /* Zero is returned if not data is available or if a | |
447 | * bad-entropy error is present. Assume an error if | |
448 | * we exceed TRNG_RETRIES reads of zero. | |
449 | */ | |
450 | if (ccp->hwrng_retries++ > TRNG_RETRIES) | |
451 | return -EIO; | |
452 | ||
453 | return 0; | |
454 | } | |
455 | ||
456 | /* Reset the counter and save the rng value */ | |
457 | ccp->hwrng_retries = 0; | |
458 | memcpy(data, &trng_value, len); | |
459 | ||
460 | return len; | |
461 | } | |
462 | ||
63b94509 TL |
463 | #ifdef CONFIG_PM |
464 | bool ccp_queues_suspended(struct ccp_device *ccp) | |
465 | { | |
466 | unsigned int suspended = 0; | |
467 | unsigned long flags; | |
468 | unsigned int i; | |
469 | ||
470 | spin_lock_irqsave(&ccp->cmd_lock, flags); | |
471 | ||
472 | for (i = 0; i < ccp->cmd_q_count; i++) | |
473 | if (ccp->cmd_q[i].suspended) | |
474 | suspended++; | |
475 | ||
476 | spin_unlock_irqrestore(&ccp->cmd_lock, flags); | |
477 | ||
478 | return ccp->cmd_q_count == suspended; | |
479 | } | |
480 | #endif | |
481 | ||
63b94509 TL |
482 | static int __init ccp_mod_init(void) |
483 | { | |
c4f4b325 | 484 | #ifdef CONFIG_X86 |
db34cf91 | 485 | int ret; |
63b94509 | 486 | |
3f19ce20 GH |
487 | ret = ccp_pci_init(); |
488 | if (ret) | |
489 | return ret; | |
db34cf91 | 490 | |
3f19ce20 | 491 | /* Don't leave the driver loaded if init failed */ |
553d2374 | 492 | if (ccp_present() != 0) { |
3f19ce20 GH |
493 | ccp_pci_exit(); |
494 | return -ENODEV; | |
d1dd206c | 495 | } |
3f19ce20 GH |
496 | |
497 | return 0; | |
c4f4b325 TL |
498 | #endif |
499 | ||
500 | #ifdef CONFIG_ARM64 | |
501 | int ret; | |
502 | ||
503 | ret = ccp_platform_init(); | |
504 | if (ret) | |
505 | return ret; | |
506 | ||
507 | /* Don't leave the driver loaded if init failed */ | |
553d2374 | 508 | if (ccp_present() != 0) { |
c4f4b325 TL |
509 | ccp_platform_exit(); |
510 | return -ENODEV; | |
511 | } | |
512 | ||
513 | return 0; | |
514 | #endif | |
63b94509 TL |
515 | |
516 | return -ENODEV; | |
517 | } | |
518 | ||
519 | static void __exit ccp_mod_exit(void) | |
520 | { | |
c4f4b325 | 521 | #ifdef CONFIG_X86 |
3f19ce20 | 522 | ccp_pci_exit(); |
c4f4b325 TL |
523 | #endif |
524 | ||
525 | #ifdef CONFIG_ARM64 | |
526 | ccp_platform_exit(); | |
527 | #endif | |
63b94509 TL |
528 | } |
529 | ||
530 | module_init(ccp_mod_init); | |
531 | module_exit(ccp_mod_exit); |