Commit | Line | Data |
---|---|---|
63b94509 TL |
1 | /* |
2 | * AMD Cryptographic Coprocessor (CCP) driver | |
3 | * | |
553d2374 | 4 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. |
63b94509 TL |
5 | * |
6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | |
fba8855c | 7 | * Author: Gary R Hook <gary.hook@amd.com> |
63b94509 TL |
8 | * |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
13 | ||
14 | #include <linux/module.h> | |
15 | #include <linux/kernel.h> | |
3d77565b | 16 | #include <linux/device.h> |
63b94509 TL |
17 | #include <linux/pci.h> |
18 | #include <linux/pci_ids.h> | |
3d77565b | 19 | #include <linux/dma-mapping.h> |
63b94509 TL |
20 | #include <linux/kthread.h> |
21 | #include <linux/sched.h> | |
22 | #include <linux/interrupt.h> | |
23 | #include <linux/spinlock.h> | |
24 | #include <linux/delay.h> | |
25 | #include <linux/ccp.h> | |
26 | ||
27 | #include "ccp-dev.h" | |
28 | ||
63b94509 TL |
29 | #define MSIX_VECTORS 2 |
30 | ||
31 | struct ccp_msix { | |
32 | u32 vector; | |
33 | char name[16]; | |
34 | }; | |
35 | ||
36 | struct ccp_pci { | |
37 | int msix_count; | |
38 | struct ccp_msix msix[MSIX_VECTORS]; | |
39 | }; | |
40 | ||
41 | static int ccp_get_msix_irqs(struct ccp_device *ccp) | |
42 | { | |
43 | struct ccp_pci *ccp_pci = ccp->dev_specific; | |
44 | struct device *dev = ccp->dev; | |
c6c59bf2 | 45 | struct pci_dev *pdev = to_pci_dev(dev); |
63b94509 TL |
46 | struct msix_entry msix_entry[MSIX_VECTORS]; |
47 | unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1; | |
48 | int v, ret; | |
49 | ||
50 | for (v = 0; v < ARRAY_SIZE(msix_entry); v++) | |
51 | msix_entry[v].entry = v; | |
52 | ||
5347ee8e AG |
53 | ret = pci_enable_msix_range(pdev, msix_entry, 1, v); |
54 | if (ret < 0) | |
63b94509 TL |
55 | return ret; |
56 | ||
5347ee8e | 57 | ccp_pci->msix_count = ret; |
63b94509 TL |
58 | for (v = 0; v < ccp_pci->msix_count; v++) { |
59 | /* Set the interrupt names and request the irqs */ | |
553d2374 GH |
60 | snprintf(ccp_pci->msix[v].name, name_len, "%s-%u", |
61 | ccp->name, v); | |
63b94509 | 62 | ccp_pci->msix[v].vector = msix_entry[v].vector; |
ea0375af GH |
63 | ret = request_irq(ccp_pci->msix[v].vector, |
64 | ccp->vdata->perform->irqhandler, | |
63b94509 TL |
65 | 0, ccp_pci->msix[v].name, dev); |
66 | if (ret) { | |
67 | dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n", | |
68 | ret); | |
69 | goto e_irq; | |
70 | } | |
71 | } | |
72 | ||
73 | return 0; | |
74 | ||
75 | e_irq: | |
76 | while (v--) | |
77 | free_irq(ccp_pci->msix[v].vector, dev); | |
78 | ||
79 | pci_disable_msix(pdev); | |
80 | ||
81 | ccp_pci->msix_count = 0; | |
82 | ||
83 | return ret; | |
84 | } | |
85 | ||
86 | static int ccp_get_msi_irq(struct ccp_device *ccp) | |
87 | { | |
88 | struct device *dev = ccp->dev; | |
c6c59bf2 | 89 | struct pci_dev *pdev = to_pci_dev(dev); |
63b94509 TL |
90 | int ret; |
91 | ||
92 | ret = pci_enable_msi(pdev); | |
93 | if (ret) | |
94 | return ret; | |
95 | ||
3d77565b | 96 | ccp->irq = pdev->irq; |
ea0375af GH |
97 | ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0, |
98 | ccp->name, dev); | |
63b94509 TL |
99 | if (ret) { |
100 | dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); | |
101 | goto e_msi; | |
102 | } | |
103 | ||
104 | return 0; | |
105 | ||
106 | e_msi: | |
107 | pci_disable_msi(pdev); | |
108 | ||
109 | return ret; | |
110 | } | |
111 | ||
112 | static int ccp_get_irqs(struct ccp_device *ccp) | |
113 | { | |
114 | struct device *dev = ccp->dev; | |
115 | int ret; | |
116 | ||
117 | ret = ccp_get_msix_irqs(ccp); | |
118 | if (!ret) | |
119 | return 0; | |
120 | ||
121 | /* Couldn't get MSI-X vectors, try MSI */ | |
122 | dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret); | |
123 | ret = ccp_get_msi_irq(ccp); | |
124 | if (!ret) | |
125 | return 0; | |
126 | ||
127 | /* Couldn't get MSI interrupt */ | |
128 | dev_notice(dev, "could not enable MSI (%d)\n", ret); | |
129 | ||
130 | return ret; | |
131 | } | |
132 | ||
133 | static void ccp_free_irqs(struct ccp_device *ccp) | |
134 | { | |
135 | struct ccp_pci *ccp_pci = ccp->dev_specific; | |
136 | struct device *dev = ccp->dev; | |
c6c59bf2 | 137 | struct pci_dev *pdev = to_pci_dev(dev); |
63b94509 TL |
138 | |
139 | if (ccp_pci->msix_count) { | |
140 | while (ccp_pci->msix_count--) | |
141 | free_irq(ccp_pci->msix[ccp_pci->msix_count].vector, | |
142 | dev); | |
143 | pci_disable_msix(pdev); | |
4b394a23 | 144 | } else if (ccp->irq) { |
3d77565b | 145 | free_irq(ccp->irq, dev); |
63b94509 TL |
146 | pci_disable_msi(pdev); |
147 | } | |
4b394a23 | 148 | ccp->irq = 0; |
63b94509 TL |
149 | } |
150 | ||
151 | static int ccp_find_mmio_area(struct ccp_device *ccp) | |
152 | { | |
153 | struct device *dev = ccp->dev; | |
c6c59bf2 | 154 | struct pci_dev *pdev = to_pci_dev(dev); |
63b94509 TL |
155 | resource_size_t io_len; |
156 | unsigned long io_flags; | |
63b94509 | 157 | |
fba8855c GH |
158 | io_flags = pci_resource_flags(pdev, ccp->vdata->bar); |
159 | io_len = pci_resource_len(pdev, ccp->vdata->bar); | |
160 | if ((io_flags & IORESOURCE_MEM) && | |
161 | (io_len >= (ccp->vdata->offset + 0x800))) | |
162 | return ccp->vdata->bar; | |
63b94509 | 163 | |
63b94509 TL |
164 | return -EIO; |
165 | } | |
166 | ||
167 | static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
168 | { | |
169 | struct ccp_device *ccp; | |
170 | struct ccp_pci *ccp_pci; | |
171 | struct device *dev = &pdev->dev; | |
172 | unsigned int bar; | |
173 | int ret; | |
174 | ||
175 | ret = -ENOMEM; | |
176 | ccp = ccp_alloc_struct(dev); | |
177 | if (!ccp) | |
178 | goto e_err; | |
179 | ||
be03a3a0 TL |
180 | ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL); |
181 | if (!ccp_pci) | |
182 | goto e_err; | |
183 | ||
63b94509 | 184 | ccp->dev_specific = ccp_pci; |
c7019c4d GH |
185 | ccp->vdata = (struct ccp_vdata *)id->driver_data; |
186 | if (!ccp->vdata || !ccp->vdata->version) { | |
187 | ret = -ENODEV; | |
188 | dev_err(dev, "missing driver data\n"); | |
189 | goto e_err; | |
190 | } | |
63b94509 TL |
191 | ccp->get_irq = ccp_get_irqs; |
192 | ccp->free_irq = ccp_free_irqs; | |
193 | ||
194 | ret = pci_request_regions(pdev, "ccp"); | |
195 | if (ret) { | |
196 | dev_err(dev, "pci_request_regions failed (%d)\n", ret); | |
be03a3a0 | 197 | goto e_err; |
63b94509 TL |
198 | } |
199 | ||
200 | ret = pci_enable_device(pdev); | |
201 | if (ret) { | |
202 | dev_err(dev, "pci_enable_device failed (%d)\n", ret); | |
203 | goto e_regions; | |
204 | } | |
205 | ||
206 | pci_set_master(pdev); | |
207 | ||
208 | ret = ccp_find_mmio_area(ccp); | |
209 | if (ret < 0) | |
210 | goto e_device; | |
211 | bar = ret; | |
212 | ||
213 | ret = -EIO; | |
214 | ccp->io_map = pci_iomap(pdev, bar, 0); | |
8db88467 | 215 | if (!ccp->io_map) { |
63b94509 TL |
216 | dev_err(dev, "pci_iomap failed\n"); |
217 | goto e_device; | |
218 | } | |
fba8855c | 219 | ccp->io_regs = ccp->io_map + ccp->vdata->offset; |
63b94509 | 220 | |
3d77565b TL |
221 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); |
222 | if (ret) { | |
223 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); | |
63b94509 | 224 | if (ret) { |
3d77565b | 225 | dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", |
63b94509 | 226 | ret); |
3d77565b | 227 | goto e_iomap; |
63b94509 TL |
228 | } |
229 | } | |
230 | ||
231 | dev_set_drvdata(dev, ccp); | |
232 | ||
4b394a23 GH |
233 | if (ccp->vdata->setup) |
234 | ccp->vdata->setup(ccp); | |
e14e7d12 | 235 | |
ea0375af | 236 | ret = ccp->vdata->perform->init(ccp); |
63b94509 | 237 | if (ret) |
3d77565b | 238 | goto e_iomap; |
63b94509 TL |
239 | |
240 | dev_notice(dev, "enabled\n"); | |
241 | ||
242 | return 0; | |
243 | ||
3d77565b | 244 | e_iomap: |
63b94509 TL |
245 | pci_iounmap(pdev, ccp->io_map); |
246 | ||
247 | e_device: | |
248 | pci_disable_device(pdev); | |
63b94509 TL |
249 | |
250 | e_regions: | |
251 | pci_release_regions(pdev); | |
252 | ||
63b94509 TL |
253 | e_err: |
254 | dev_notice(dev, "initialization failed\n"); | |
255 | return ret; | |
256 | } | |
257 | ||
258 | static void ccp_pci_remove(struct pci_dev *pdev) | |
259 | { | |
260 | struct device *dev = &pdev->dev; | |
261 | struct ccp_device *ccp = dev_get_drvdata(dev); | |
262 | ||
db34cf91 TL |
263 | if (!ccp) |
264 | return; | |
265 | ||
ea0375af | 266 | ccp->vdata->perform->destroy(ccp); |
63b94509 TL |
267 | |
268 | pci_iounmap(pdev, ccp->io_map); | |
269 | ||
270 | pci_disable_device(pdev); | |
63b94509 TL |
271 | |
272 | pci_release_regions(pdev); | |
273 | ||
63b94509 TL |
274 | dev_notice(dev, "disabled\n"); |
275 | } | |
276 | ||
277 | #ifdef CONFIG_PM | |
278 | static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state) | |
279 | { | |
280 | struct device *dev = &pdev->dev; | |
281 | struct ccp_device *ccp = dev_get_drvdata(dev); | |
282 | unsigned long flags; | |
283 | unsigned int i; | |
284 | ||
285 | spin_lock_irqsave(&ccp->cmd_lock, flags); | |
286 | ||
287 | ccp->suspending = 1; | |
288 | ||
289 | /* Wake all the queue kthreads to prepare for suspend */ | |
290 | for (i = 0; i < ccp->cmd_q_count; i++) | |
291 | wake_up_process(ccp->cmd_q[i].kthread); | |
292 | ||
293 | spin_unlock_irqrestore(&ccp->cmd_lock, flags); | |
294 | ||
295 | /* Wait for all queue kthreads to say they're done */ | |
296 | while (!ccp_queues_suspended(ccp)) | |
297 | wait_event_interruptible(ccp->suspend_queue, | |
298 | ccp_queues_suspended(ccp)); | |
299 | ||
300 | return 0; | |
301 | } | |
302 | ||
303 | static int ccp_pci_resume(struct pci_dev *pdev) | |
304 | { | |
305 | struct device *dev = &pdev->dev; | |
306 | struct ccp_device *ccp = dev_get_drvdata(dev); | |
307 | unsigned long flags; | |
308 | unsigned int i; | |
309 | ||
310 | spin_lock_irqsave(&ccp->cmd_lock, flags); | |
311 | ||
312 | ccp->suspending = 0; | |
313 | ||
314 | /* Wake up all the kthreads */ | |
315 | for (i = 0; i < ccp->cmd_q_count; i++) { | |
316 | ccp->cmd_q[i].suspended = 0; | |
317 | wake_up_process(ccp->cmd_q[i].kthread); | |
318 | } | |
319 | ||
320 | spin_unlock_irqrestore(&ccp->cmd_lock, flags); | |
321 | ||
322 | return 0; | |
323 | } | |
324 | #endif | |
325 | ||
9baa3c34 | 326 | static const struct pci_device_id ccp_pci_table[] = { |
c7019c4d | 327 | { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 }, |
4b394a23 | 328 | { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5 }, |
e14e7d12 | 329 | { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5other }, |
63b94509 TL |
330 | /* Last entry must be zero */ |
331 | { 0, } | |
332 | }; | |
333 | MODULE_DEVICE_TABLE(pci, ccp_pci_table); | |
334 | ||
335 | static struct pci_driver ccp_pci_driver = { | |
166db195 | 336 | .name = "ccp", |
63b94509 TL |
337 | .id_table = ccp_pci_table, |
338 | .probe = ccp_pci_probe, | |
339 | .remove = ccp_pci_remove, | |
340 | #ifdef CONFIG_PM | |
341 | .suspend = ccp_pci_suspend, | |
342 | .resume = ccp_pci_resume, | |
343 | #endif | |
344 | }; | |
345 | ||
346 | int ccp_pci_init(void) | |
347 | { | |
348 | return pci_register_driver(&ccp_pci_driver); | |
349 | } | |
350 | ||
351 | void ccp_pci_exit(void) | |
352 | { | |
353 | pci_unregister_driver(&ccp_pci_driver); | |
354 | } |