Merge tag 'vfio-v3.11' of git://github.com/awilliam/linux-vfio
[deliverable/linux.git] / drivers / pci / pcie / portdrv_pci.c
1 /*
2 * File: portdrv_pci.c
3 * Purpose: PCI Express Port Bus Driver
4 *
5 * Copyright (C) 2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
8
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/pm.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/init.h>
16 #include <linux/pcieport_if.h>
17 #include <linux/aer.h>
18 #include <linux/dmi.h>
19 #include <linux/pci-aspm.h>
20
21 #include "portdrv.h"
22 #include "aer/aerdrv.h"
23
24 /*
25 * Version Information
26 */
27 #define DRIVER_VERSION "v1.0"
28 #define DRIVER_AUTHOR "tom.l.nguyen@intel.com"
29 #define DRIVER_DESC "PCIe Port Bus Driver"
30 MODULE_AUTHOR(DRIVER_AUTHOR);
31 MODULE_DESCRIPTION(DRIVER_DESC);
32 MODULE_LICENSE("GPL");
33
34 /* If this switch is set, PCIe port native services should not be enabled. */
35 bool pcie_ports_disabled;
36
37 /*
38 * If this switch is set, ACPI _OSC will be used to determine whether or not to
39 * enable PCIe port native services.
40 */
41 bool pcie_ports_auto = true;
42
43 static int __init pcie_port_setup(char *str)
44 {
45 if (!strncmp(str, "compat", 6)) {
46 pcie_ports_disabled = true;
47 } else if (!strncmp(str, "native", 6)) {
48 pcie_ports_disabled = false;
49 pcie_ports_auto = false;
50 } else if (!strncmp(str, "auto", 4)) {
51 pcie_ports_disabled = false;
52 pcie_ports_auto = true;
53 }
54
55 return 1;
56 }
57 __setup("pcie_ports=", pcie_port_setup);
58
59 /* global data */
60
61 /**
62 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
63 * @dev: PCIe root port or event collector.
64 */
65 void pcie_clear_root_pme_status(struct pci_dev *dev)
66 {
67 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
68 }
69
70 static int pcie_portdrv_restore_config(struct pci_dev *dev)
71 {
72 int retval;
73
74 retval = pci_enable_device(dev);
75 if (retval)
76 return retval;
77 pci_set_master(dev);
78 return 0;
79 }
80
81 #ifdef CONFIG_PM
82 static int pcie_port_resume_noirq(struct device *dev)
83 {
84 struct pci_dev *pdev = to_pci_dev(dev);
85
86 /*
87 * Some BIOSes forget to clear Root PME Status bits after system wakeup
88 * which breaks ACPI-based runtime wakeup on PCI Express, so clear those
89 * bits now just in case (shouldn't hurt).
90 */
91 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
92 pcie_clear_root_pme_status(pdev);
93 return 0;
94 }
95
96 #ifdef CONFIG_PM_RUNTIME
97 struct d3cold_info {
98 bool no_d3cold;
99 unsigned int d3cold_delay;
100 };
101
102 static int pci_dev_d3cold_info(struct pci_dev *pdev, void *data)
103 {
104 struct d3cold_info *info = data;
105
106 info->d3cold_delay = max_t(unsigned int, pdev->d3cold_delay,
107 info->d3cold_delay);
108 if (pdev->no_d3cold)
109 info->no_d3cold = true;
110 return 0;
111 }
112
113 static int pcie_port_runtime_suspend(struct device *dev)
114 {
115 struct pci_dev *pdev = to_pci_dev(dev);
116 struct d3cold_info d3cold_info = {
117 .no_d3cold = false,
118 .d3cold_delay = PCI_PM_D3_WAIT,
119 };
120
121 /*
122 * If any subordinate device disable D3cold, we should not put
123 * the port into D3cold. The D3cold delay of port should be
124 * the max of that of all subordinate devices.
125 */
126 pci_walk_bus(pdev->subordinate, pci_dev_d3cold_info, &d3cold_info);
127 pdev->no_d3cold = d3cold_info.no_d3cold;
128 pdev->d3cold_delay = d3cold_info.d3cold_delay;
129 return 0;
130 }
131
132 static int pcie_port_runtime_resume(struct device *dev)
133 {
134 return 0;
135 }
136
137 static int pci_dev_pme_poll(struct pci_dev *pdev, void *data)
138 {
139 bool *pme_poll = data;
140
141 if (pdev->pme_poll)
142 *pme_poll = true;
143 return 0;
144 }
145
146 static int pcie_port_runtime_idle(struct device *dev)
147 {
148 struct pci_dev *pdev = to_pci_dev(dev);
149 bool pme_poll = false;
150
151 /*
152 * If any subordinate device needs pme poll, we should keep
153 * the port in D0, because we need port in D0 to poll it.
154 */
155 pci_walk_bus(pdev->subordinate, pci_dev_pme_poll, &pme_poll);
156 /* Delay for a short while to prevent too frequent suspend/resume */
157 if (!pme_poll)
158 pm_schedule_suspend(dev, 10);
159 return -EBUSY;
160 }
161 #else
162 #define pcie_port_runtime_suspend NULL
163 #define pcie_port_runtime_resume NULL
164 #define pcie_port_runtime_idle NULL
165 #endif
166
167 static const struct dev_pm_ops pcie_portdrv_pm_ops = {
168 .suspend = pcie_port_device_suspend,
169 .resume = pcie_port_device_resume,
170 .freeze = pcie_port_device_suspend,
171 .thaw = pcie_port_device_resume,
172 .poweroff = pcie_port_device_suspend,
173 .restore = pcie_port_device_resume,
174 .resume_noirq = pcie_port_resume_noirq,
175 .runtime_suspend = pcie_port_runtime_suspend,
176 .runtime_resume = pcie_port_runtime_resume,
177 .runtime_idle = pcie_port_runtime_idle,
178 };
179
180 #define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
181
182 #else /* !PM */
183
184 #define PCIE_PORTDRV_PM_OPS NULL
185 #endif /* !PM */
186
187 /*
188 * pcie_portdrv_probe - Probe PCI-Express port devices
189 * @dev: PCI-Express port device being probed
190 *
191 * If detected invokes the pcie_port_device_register() method for
192 * this port device.
193 *
194 */
195 static int pcie_portdrv_probe(struct pci_dev *dev,
196 const struct pci_device_id *id)
197 {
198 int status;
199
200 if (!pci_is_pcie(dev) ||
201 ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
202 (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM) &&
203 (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
204 return -ENODEV;
205
206 if (!dev->irq && dev->pin) {
207 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
208 "check vendor BIOS\n", dev->vendor, dev->device);
209 }
210 status = pcie_port_device_register(dev);
211 if (status)
212 return status;
213
214 pci_save_state(dev);
215 /*
216 * D3cold may not work properly on some PCIe port, so disable
217 * it by default.
218 */
219 dev->d3cold_allowed = false;
220 return 0;
221 }
222
223 static void pcie_portdrv_remove(struct pci_dev *dev)
224 {
225 pcie_port_device_remove(dev);
226 pci_disable_device(dev);
227 }
228
229 static int error_detected_iter(struct device *device, void *data)
230 {
231 struct pcie_device *pcie_device;
232 struct pcie_port_service_driver *driver;
233 struct aer_broadcast_data *result_data;
234 pci_ers_result_t status;
235
236 result_data = (struct aer_broadcast_data *) data;
237
238 if (device->bus == &pcie_port_bus_type && device->driver) {
239 driver = to_service_driver(device->driver);
240 if (!driver ||
241 !driver->err_handler ||
242 !driver->err_handler->error_detected)
243 return 0;
244
245 pcie_device = to_pcie_device(device);
246
247 /* Forward error detected message to service drivers */
248 status = driver->err_handler->error_detected(
249 pcie_device->port,
250 result_data->state);
251 result_data->result =
252 merge_result(result_data->result, status);
253 }
254
255 return 0;
256 }
257
258 static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
259 enum pci_channel_state error)
260 {
261 struct aer_broadcast_data data = {error, PCI_ERS_RESULT_CAN_RECOVER};
262
263 /* get true return value from &data */
264 device_for_each_child(&dev->dev, &data, error_detected_iter);
265 return data.result;
266 }
267
268 static int mmio_enabled_iter(struct device *device, void *data)
269 {
270 struct pcie_device *pcie_device;
271 struct pcie_port_service_driver *driver;
272 pci_ers_result_t status, *result;
273
274 result = (pci_ers_result_t *) data;
275
276 if (device->bus == &pcie_port_bus_type && device->driver) {
277 driver = to_service_driver(device->driver);
278 if (driver &&
279 driver->err_handler &&
280 driver->err_handler->mmio_enabled) {
281 pcie_device = to_pcie_device(device);
282
283 /* Forward error message to service drivers */
284 status = driver->err_handler->mmio_enabled(
285 pcie_device->port);
286 *result = merge_result(*result, status);
287 }
288 }
289
290 return 0;
291 }
292
293 static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev)
294 {
295 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
296
297 /* get true return value from &status */
298 device_for_each_child(&dev->dev, &status, mmio_enabled_iter);
299 return status;
300 }
301
302 static int slot_reset_iter(struct device *device, void *data)
303 {
304 struct pcie_device *pcie_device;
305 struct pcie_port_service_driver *driver;
306 pci_ers_result_t status, *result;
307
308 result = (pci_ers_result_t *) data;
309
310 if (device->bus == &pcie_port_bus_type && device->driver) {
311 driver = to_service_driver(device->driver);
312 if (driver &&
313 driver->err_handler &&
314 driver->err_handler->slot_reset) {
315 pcie_device = to_pcie_device(device);
316
317 /* Forward error message to service drivers */
318 status = driver->err_handler->slot_reset(
319 pcie_device->port);
320 *result = merge_result(*result, status);
321 }
322 }
323
324 return 0;
325 }
326
327 static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
328 {
329 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
330
331 /* If fatal, restore cfg space for possible link reset at upstream */
332 if (dev->error_state == pci_channel_io_frozen) {
333 dev->state_saved = true;
334 pci_restore_state(dev);
335 pcie_portdrv_restore_config(dev);
336 pci_enable_pcie_error_reporting(dev);
337 }
338
339 /* get true return value from &status */
340 device_for_each_child(&dev->dev, &status, slot_reset_iter);
341 return status;
342 }
343
344 static int resume_iter(struct device *device, void *data)
345 {
346 struct pcie_device *pcie_device;
347 struct pcie_port_service_driver *driver;
348
349 if (device->bus == &pcie_port_bus_type && device->driver) {
350 driver = to_service_driver(device->driver);
351 if (driver &&
352 driver->err_handler &&
353 driver->err_handler->resume) {
354 pcie_device = to_pcie_device(device);
355
356 /* Forward error message to service drivers */
357 driver->err_handler->resume(pcie_device->port);
358 }
359 }
360
361 return 0;
362 }
363
364 static void pcie_portdrv_err_resume(struct pci_dev *dev)
365 {
366 device_for_each_child(&dev->dev, NULL, resume_iter);
367 }
368
369 /*
370 * LINUX Device Driver Model
371 */
372 static const struct pci_device_id port_pci_ids[] = { {
373 /* handle any PCI-Express port */
374 PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0),
375 }, { /* end: all zeroes */ }
376 };
377 MODULE_DEVICE_TABLE(pci, port_pci_ids);
378
379 static const struct pci_error_handlers pcie_portdrv_err_handler = {
380 .error_detected = pcie_portdrv_error_detected,
381 .mmio_enabled = pcie_portdrv_mmio_enabled,
382 .slot_reset = pcie_portdrv_slot_reset,
383 .resume = pcie_portdrv_err_resume,
384 };
385
386 static struct pci_driver pcie_portdriver = {
387 .name = "pcieport",
388 .id_table = &port_pci_ids[0],
389
390 .probe = pcie_portdrv_probe,
391 .remove = pcie_portdrv_remove,
392
393 .err_handler = &pcie_portdrv_err_handler,
394
395 .driver.pm = PCIE_PORTDRV_PM_OPS,
396 };
397
398 static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
399 {
400 pr_notice("%s detected: will not use MSI for PCIe PME signaling\n",
401 d->ident);
402 pcie_pme_disable_msi();
403 return 0;
404 }
405
406 static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = {
407 /*
408 * Boxes that should not use MSI for PCIe PME signaling.
409 */
410 {
411 .callback = dmi_pcie_pme_disable_msi,
412 .ident = "MSI Wind U-100",
413 .matches = {
414 DMI_MATCH(DMI_SYS_VENDOR,
415 "MICRO-STAR INTERNATIONAL CO., LTD"),
416 DMI_MATCH(DMI_PRODUCT_NAME, "U-100"),
417 },
418 },
419 {}
420 };
421
422 static int __init pcie_portdrv_init(void)
423 {
424 int retval;
425
426 if (pcie_ports_disabled)
427 return pci_register_driver(&pcie_portdriver);
428
429 dmi_check_system(pcie_portdrv_dmi_table);
430
431 retval = pcie_port_bus_register();
432 if (retval) {
433 printk(KERN_WARNING "PCIE: bus_register error: %d\n", retval);
434 goto out;
435 }
436 retval = pci_register_driver(&pcie_portdriver);
437 if (retval)
438 pcie_port_bus_unregister();
439 out:
440 return retval;
441 }
442
443 module_init(pcie_portdrv_init);
This page took 0.041791 seconds and 5 git commands to generate.