Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / arch / powerpc / kernel / eeh_driver.c
1 /*
2 * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
3 * Copyright IBM Corp. 2004 2005
4 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
5 *
6 * All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16 * NON INFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
24 */
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <asm/eeh.h>
31 #include <asm/eeh_event.h>
32 #include <asm/ppc-pci.h>
33 #include <asm/pci-bridge.h>
34 #include <asm/prom.h>
35 #include <asm/rtas.h>
36
37 /**
38 * eeh_pcid_name - Retrieve name of PCI device driver
39 * @pdev: PCI device
40 *
41 * This routine is used to retrieve the name of PCI device driver
42 * if that's valid.
43 */
44 static inline const char *eeh_pcid_name(struct pci_dev *pdev)
45 {
46 if (pdev && pdev->dev.driver)
47 return pdev->dev.driver->name;
48 return "";
49 }
50
51 /**
52 * eeh_pcid_get - Get the PCI device driver
53 * @pdev: PCI device
54 *
55 * The function is used to retrieve the PCI device driver for
56 * the indicated PCI device. Besides, we will increase the reference
57 * of the PCI device driver to prevent that being unloaded on
58 * the fly. Otherwise, kernel crash would be seen.
59 */
60 static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
61 {
62 if (!pdev || !pdev->driver)
63 return NULL;
64
65 if (!try_module_get(pdev->driver->driver.owner))
66 return NULL;
67
68 return pdev->driver;
69 }
70
71 /**
72 * eeh_pcid_put - Dereference on the PCI device driver
73 * @pdev: PCI device
74 *
75 * The function is called to do dereference on the PCI device
76 * driver of the indicated PCI device.
77 */
78 static inline void eeh_pcid_put(struct pci_dev *pdev)
79 {
80 if (!pdev || !pdev->driver)
81 return;
82
83 module_put(pdev->driver->driver.owner);
84 }
85
86 #if 0
87 static void print_device_node_tree(struct pci_dn *pdn, int dent)
88 {
89 int i;
90 struct device_node *pc;
91
92 if (!pdn)
93 return;
94 for (i = 0; i < dent; i++)
95 printk(" ");
96 printk("dn=%s mode=%x \tcfg_addr=%x pe_addr=%x \tfull=%s\n",
97 pdn->node->name, pdn->eeh_mode, pdn->eeh_config_addr,
98 pdn->eeh_pe_config_addr, pdn->node->full_name);
99 dent += 3;
100 pc = pdn->node->child;
101 while (pc) {
102 print_device_node_tree(PCI_DN(pc), dent);
103 pc = pc->sibling;
104 }
105 }
106 #endif
107
108 /**
109 * eeh_disable_irq - Disable interrupt for the recovering device
110 * @dev: PCI device
111 *
112 * This routine must be called when reporting temporary or permanent
113 * error to the particular PCI device to disable interrupt of that
114 * device. If the device has enabled MSI or MSI-X interrupt, we needn't
115 * do real work because EEH should freeze DMA transfers for those PCI
116 * devices encountering EEH errors, which includes MSI or MSI-X.
117 */
118 static void eeh_disable_irq(struct pci_dev *dev)
119 {
120 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
121
122 /* Don't disable MSI and MSI-X interrupts. They are
123 * effectively disabled by the DMA Stopped state
124 * when an EEH error occurs.
125 */
126 if (dev->msi_enabled || dev->msix_enabled)
127 return;
128
129 if (!irq_has_action(dev->irq))
130 return;
131
132 edev->mode |= EEH_DEV_IRQ_DISABLED;
133 disable_irq_nosync(dev->irq);
134 }
135
136 /**
137 * eeh_enable_irq - Enable interrupt for the recovering device
138 * @dev: PCI device
139 *
140 * This routine must be called to enable interrupt while failed
141 * device could be resumed.
142 */
143 static void eeh_enable_irq(struct pci_dev *dev)
144 {
145 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
146
147 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
148 edev->mode &= ~EEH_DEV_IRQ_DISABLED;
149 /*
150 * FIXME !!!!!
151 *
152 * This is just ass backwards. This maze has
153 * unbalanced irq_enable/disable calls. So instead of
154 * finding the root cause it works around the warning
155 * in the irq_enable code by conditionally calling
156 * into it.
157 *
158 * That's just wrong.The warning in the core code is
159 * there to tell people to fix their assymetries in
160 * their own code, not by abusing the core information
161 * to avoid it.
162 *
163 * I so wish that the assymetry would be the other way
164 * round and a few more irq_disable calls render that
165 * shit unusable forever.
166 *
167 * tglx
168 */
169 if (irqd_irq_disabled(irq_get_irq_data(dev->irq)))
170 enable_irq(dev->irq);
171 }
172 }
173
174 static bool eeh_dev_removed(struct eeh_dev *edev)
175 {
176 /* EEH device removed ? */
177 if (!edev || (edev->mode & EEH_DEV_REMOVED))
178 return true;
179
180 return false;
181 }
182
183 static void *eeh_dev_save_state(void *data, void *userdata)
184 {
185 struct eeh_dev *edev = data;
186 struct pci_dev *pdev;
187
188 if (!edev)
189 return NULL;
190
191 pdev = eeh_dev_to_pci_dev(edev);
192 if (!pdev)
193 return NULL;
194
195 pci_save_state(pdev);
196 return NULL;
197 }
198
199 /**
200 * eeh_report_error - Report pci error to each device driver
201 * @data: eeh device
202 * @userdata: return value
203 *
204 * Report an EEH error to each device driver, collect up and
205 * merge the device driver responses. Cumulative response
206 * passed back in "userdata".
207 */
208 static void *eeh_report_error(void *data, void *userdata)
209 {
210 struct eeh_dev *edev = (struct eeh_dev *)data;
211 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
212 enum pci_ers_result rc, *res = userdata;
213 struct pci_driver *driver;
214
215 if (!dev || eeh_dev_removed(edev))
216 return NULL;
217 dev->error_state = pci_channel_io_frozen;
218
219 driver = eeh_pcid_get(dev);
220 if (!driver) return NULL;
221
222 eeh_disable_irq(dev);
223
224 if (!driver->err_handler ||
225 !driver->err_handler->error_detected) {
226 eeh_pcid_put(dev);
227 return NULL;
228 }
229
230 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
231
232 /* A driver that needs a reset trumps all others */
233 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
234 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
235
236 eeh_pcid_put(dev);
237 return NULL;
238 }
239
240 /**
241 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
242 * @data: eeh device
243 * @userdata: return value
244 *
245 * Tells each device driver that IO ports, MMIO and config space I/O
246 * are now enabled. Collects up and merges the device driver responses.
247 * Cumulative response passed back in "userdata".
248 */
249 static void *eeh_report_mmio_enabled(void *data, void *userdata)
250 {
251 struct eeh_dev *edev = (struct eeh_dev *)data;
252 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
253 enum pci_ers_result rc, *res = userdata;
254 struct pci_driver *driver;
255
256 if (!dev || eeh_dev_removed(edev))
257 return NULL;
258
259 driver = eeh_pcid_get(dev);
260 if (!driver) return NULL;
261
262 if (!driver->err_handler ||
263 !driver->err_handler->mmio_enabled ||
264 (edev->mode & EEH_DEV_NO_HANDLER)) {
265 eeh_pcid_put(dev);
266 return NULL;
267 }
268
269 rc = driver->err_handler->mmio_enabled(dev);
270
271 /* A driver that needs a reset trumps all others */
272 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
273 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
274
275 eeh_pcid_put(dev);
276 return NULL;
277 }
278
279 /**
280 * eeh_report_reset - Tell device that slot has been reset
281 * @data: eeh device
282 * @userdata: return value
283 *
284 * This routine must be called while EEH tries to reset particular
285 * PCI device so that the associated PCI device driver could take
286 * some actions, usually to save data the driver needs so that the
287 * driver can work again while the device is recovered.
288 */
289 static void *eeh_report_reset(void *data, void *userdata)
290 {
291 struct eeh_dev *edev = (struct eeh_dev *)data;
292 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
293 enum pci_ers_result rc, *res = userdata;
294 struct pci_driver *driver;
295
296 if (!dev || eeh_dev_removed(edev))
297 return NULL;
298 dev->error_state = pci_channel_io_normal;
299
300 driver = eeh_pcid_get(dev);
301 if (!driver) return NULL;
302
303 eeh_enable_irq(dev);
304
305 if (!driver->err_handler ||
306 !driver->err_handler->slot_reset ||
307 (edev->mode & EEH_DEV_NO_HANDLER)) {
308 eeh_pcid_put(dev);
309 return NULL;
310 }
311
312 rc = driver->err_handler->slot_reset(dev);
313 if ((*res == PCI_ERS_RESULT_NONE) ||
314 (*res == PCI_ERS_RESULT_RECOVERED)) *res = rc;
315 if (*res == PCI_ERS_RESULT_DISCONNECT &&
316 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
317
318 eeh_pcid_put(dev);
319 return NULL;
320 }
321
322 static void *eeh_dev_restore_state(void *data, void *userdata)
323 {
324 struct eeh_dev *edev = data;
325 struct pci_dev *pdev;
326
327 if (!edev)
328 return NULL;
329
330 pdev = eeh_dev_to_pci_dev(edev);
331 if (!pdev)
332 return NULL;
333
334 pci_restore_state(pdev);
335 return NULL;
336 }
337
338 /**
339 * eeh_report_resume - Tell device to resume normal operations
340 * @data: eeh device
341 * @userdata: return value
342 *
343 * This routine must be called to notify the device driver that it
344 * could resume so that the device driver can do some initialization
345 * to make the recovered device work again.
346 */
347 static void *eeh_report_resume(void *data, void *userdata)
348 {
349 struct eeh_dev *edev = (struct eeh_dev *)data;
350 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
351 struct pci_driver *driver;
352
353 if (!dev || eeh_dev_removed(edev))
354 return NULL;
355 dev->error_state = pci_channel_io_normal;
356
357 driver = eeh_pcid_get(dev);
358 if (!driver) return NULL;
359
360 eeh_enable_irq(dev);
361
362 if (!driver->err_handler ||
363 !driver->err_handler->resume ||
364 (edev->mode & EEH_DEV_NO_HANDLER)) {
365 edev->mode &= ~EEH_DEV_NO_HANDLER;
366 eeh_pcid_put(dev);
367 return NULL;
368 }
369
370 driver->err_handler->resume(dev);
371
372 eeh_pcid_put(dev);
373 return NULL;
374 }
375
376 /**
377 * eeh_report_failure - Tell device driver that device is dead.
378 * @data: eeh device
379 * @userdata: return value
380 *
381 * This informs the device driver that the device is permanently
382 * dead, and that no further recovery attempts will be made on it.
383 */
384 static void *eeh_report_failure(void *data, void *userdata)
385 {
386 struct eeh_dev *edev = (struct eeh_dev *)data;
387 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
388 struct pci_driver *driver;
389
390 if (!dev || eeh_dev_removed(edev))
391 return NULL;
392 dev->error_state = pci_channel_io_perm_failure;
393
394 driver = eeh_pcid_get(dev);
395 if (!driver) return NULL;
396
397 eeh_disable_irq(dev);
398
399 if (!driver->err_handler ||
400 !driver->err_handler->error_detected) {
401 eeh_pcid_put(dev);
402 return NULL;
403 }
404
405 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
406
407 eeh_pcid_put(dev);
408 return NULL;
409 }
410
411 static void *eeh_rmv_device(void *data, void *userdata)
412 {
413 struct pci_driver *driver;
414 struct eeh_dev *edev = (struct eeh_dev *)data;
415 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
416 int *removed = (int *)userdata;
417
418 /*
419 * Actually, we should remove the PCI bridges as well.
420 * However, that's lots of complexity to do that,
421 * particularly some of devices under the bridge might
422 * support EEH. So we just care about PCI devices for
423 * simplicity here.
424 */
425 if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
426 return NULL;
427
428 /*
429 * We rely on count-based pcibios_release_device() to
430 * detach permanently offlined PEs. Unfortunately, that's
431 * not reliable enough. We might have the permanently
432 * offlined PEs attached, but we needn't take care of
433 * them and their child devices.
434 */
435 if (eeh_dev_removed(edev))
436 return NULL;
437
438 driver = eeh_pcid_get(dev);
439 if (driver) {
440 eeh_pcid_put(dev);
441 if (driver->err_handler)
442 return NULL;
443 }
444
445 /* Remove it from PCI subsystem */
446 pr_debug("EEH: Removing %s without EEH sensitive driver\n",
447 pci_name(dev));
448 edev->bus = dev->bus;
449 edev->mode |= EEH_DEV_DISCONNECTED;
450 (*removed)++;
451
452 pci_lock_rescan_remove();
453 pci_stop_and_remove_bus_device(dev);
454 pci_unlock_rescan_remove();
455
456 return NULL;
457 }
458
459 static void *eeh_pe_detach_dev(void *data, void *userdata)
460 {
461 struct eeh_pe *pe = (struct eeh_pe *)data;
462 struct eeh_dev *edev, *tmp;
463
464 eeh_pe_for_each_dev(pe, edev, tmp) {
465 if (!(edev->mode & EEH_DEV_DISCONNECTED))
466 continue;
467
468 edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
469 eeh_rmv_from_parent_pe(edev);
470 }
471
472 return NULL;
473 }
474
475 /*
476 * Explicitly clear PE's frozen state for PowerNV where
477 * we have frozen PE until BAR restore is completed. It's
478 * harmless to clear it for pSeries. To be consistent with
479 * PE reset (for 3 times), we try to clear the frozen state
480 * for 3 times as well.
481 */
482 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
483 {
484 struct eeh_pe *pe = (struct eeh_pe *)data;
485 bool *clear_sw_state = flag;
486 int i, rc = 1;
487
488 for (i = 0; rc && i < 3; i++)
489 rc = eeh_unfreeze_pe(pe, clear_sw_state);
490
491 /* Stop immediately on any errors */
492 if (rc) {
493 pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n",
494 __func__, rc, pe->phb->global_number, pe->addr);
495 return (void *)pe;
496 }
497
498 return NULL;
499 }
500
501 static int eeh_clear_pe_frozen_state(struct eeh_pe *pe,
502 bool clear_sw_state)
503 {
504 void *rc;
505
506 rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state);
507 if (!rc)
508 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
509
510 return rc ? -EIO : 0;
511 }
512
513 int eeh_pe_reset_and_recover(struct eeh_pe *pe)
514 {
515 int result, ret;
516
517 /* Bail if the PE is being recovered */
518 if (pe->state & EEH_PE_RECOVERING)
519 return 0;
520
521 /* Put the PE into recovery mode */
522 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
523
524 /* Save states */
525 eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
526
527 /* Report error */
528 eeh_pe_dev_traverse(pe, eeh_report_error, &result);
529
530 /* Issue reset */
531 ret = eeh_reset_pe(pe);
532 if (ret) {
533 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
534 return ret;
535 }
536
537 /* Unfreeze the PE */
538 ret = eeh_clear_pe_frozen_state(pe, true);
539 if (ret) {
540 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
541 return ret;
542 }
543
544 /* Notify completion of reset */
545 eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
546
547 /* Restore device state */
548 eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
549
550 /* Resume */
551 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
552
553 /* Clear recovery mode */
554 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
555
556 return 0;
557 }
558
559 /**
560 * eeh_reset_device - Perform actual reset of a pci slot
561 * @pe: EEH PE
562 * @bus: PCI bus corresponding to the isolcated slot
563 *
564 * This routine must be called to do reset on the indicated PE.
565 * During the reset, udev might be invoked because those affected
566 * PCI devices will be removed and then added.
567 */
568 static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
569 {
570 struct pci_bus *frozen_bus = eeh_pe_bus_get(pe);
571 struct timeval tstamp;
572 int cnt, rc, removed = 0;
573
574 /* pcibios will clear the counter; save the value */
575 cnt = pe->freeze_count;
576 tstamp = pe->tstamp;
577
578 /*
579 * We don't remove the corresponding PE instances because
580 * we need the information afterwords. The attached EEH
581 * devices are expected to be attached soon when calling
582 * into pcibios_add_pci_devices().
583 */
584 eeh_pe_state_mark(pe, EEH_PE_KEEP);
585 if (bus) {
586 pci_lock_rescan_remove();
587 pcibios_remove_pci_devices(bus);
588 pci_unlock_rescan_remove();
589 } else if (frozen_bus) {
590 eeh_pe_dev_traverse(pe, eeh_rmv_device, &removed);
591 }
592
593 /*
594 * Reset the pci controller. (Asserts RST#; resets config space).
595 * Reconfigure bridges and devices. Don't try to bring the system
596 * up if the reset failed for some reason.
597 *
598 * During the reset, it's very dangerous to have uncontrolled PCI
599 * config accesses. So we prefer to block them. However, controlled
600 * PCI config accesses initiated from EEH itself are allowed.
601 */
602 rc = eeh_reset_pe(pe);
603 if (rc)
604 return rc;
605
606 pci_lock_rescan_remove();
607
608 /* Restore PE */
609 eeh_ops->configure_bridge(pe);
610 eeh_pe_restore_bars(pe);
611
612 /* Clear frozen state */
613 rc = eeh_clear_pe_frozen_state(pe, false);
614 if (rc)
615 return rc;
616
617 /* Give the system 5 seconds to finish running the user-space
618 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
619 * this is a hack, but if we don't do this, and try to bring
620 * the device up before the scripts have taken it down,
621 * potentially weird things happen.
622 */
623 if (bus) {
624 pr_info("EEH: Sleep 5s ahead of complete hotplug\n");
625 ssleep(5);
626
627 /*
628 * The EEH device is still connected with its parent
629 * PE. We should disconnect it so the binding can be
630 * rebuilt when adding PCI devices.
631 */
632 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
633 pcibios_add_pci_devices(bus);
634 } else if (frozen_bus && removed) {
635 pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
636 ssleep(5);
637
638 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
639 pcibios_add_pci_devices(frozen_bus);
640 }
641 eeh_pe_state_clear(pe, EEH_PE_KEEP);
642
643 pe->tstamp = tstamp;
644 pe->freeze_count = cnt;
645
646 pci_unlock_rescan_remove();
647 return 0;
648 }
649
650 /* The longest amount of time to wait for a pci device
651 * to come back on line, in seconds.
652 */
653 #define MAX_WAIT_FOR_RECOVERY 300
654
655 static void eeh_handle_normal_event(struct eeh_pe *pe)
656 {
657 struct pci_bus *frozen_bus;
658 int rc = 0;
659 enum pci_ers_result result = PCI_ERS_RESULT_NONE;
660
661 frozen_bus = eeh_pe_bus_get(pe);
662 if (!frozen_bus) {
663 pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
664 __func__, pe->phb->global_number, pe->addr);
665 return;
666 }
667
668 eeh_pe_update_time_stamp(pe);
669 pe->freeze_count++;
670 if (pe->freeze_count > eeh_max_freezes)
671 goto excess_failures;
672 pr_warn("EEH: This PCI device has failed %d times in the last hour\n",
673 pe->freeze_count);
674
675 /* Walk the various device drivers attached to this slot through
676 * a reset sequence, giving each an opportunity to do what it needs
677 * to accomplish the reset. Each child gets a report of the
678 * status ... if any child can't handle the reset, then the entire
679 * slot is dlpar removed and added.
680 */
681 pr_info("EEH: Notify device drivers to shutdown\n");
682 eeh_pe_dev_traverse(pe, eeh_report_error, &result);
683
684 /* Get the current PCI slot state. This can take a long time,
685 * sometimes over 3 seconds for certain systems.
686 */
687 rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
688 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
689 pr_warn("EEH: Permanent failure\n");
690 goto hard_fail;
691 }
692
693 /* Since rtas may enable MMIO when posting the error log,
694 * don't post the error log until after all dev drivers
695 * have been informed.
696 */
697 pr_info("EEH: Collect temporary log\n");
698 eeh_slot_error_detail(pe, EEH_LOG_TEMP);
699
700 /* If all device drivers were EEH-unaware, then shut
701 * down all of the device drivers, and hope they
702 * go down willingly, without panicing the system.
703 */
704 if (result == PCI_ERS_RESULT_NONE) {
705 pr_info("EEH: Reset with hotplug activity\n");
706 rc = eeh_reset_device(pe, frozen_bus);
707 if (rc) {
708 pr_warn("%s: Unable to reset, err=%d\n",
709 __func__, rc);
710 goto hard_fail;
711 }
712 }
713
714 /* If all devices reported they can proceed, then re-enable MMIO */
715 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
716 pr_info("EEH: Enable I/O for affected devices\n");
717 rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
718
719 if (rc < 0)
720 goto hard_fail;
721 if (rc) {
722 result = PCI_ERS_RESULT_NEED_RESET;
723 } else {
724 pr_info("EEH: Notify device drivers to resume I/O\n");
725 eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result);
726 }
727 }
728
729 /* If all devices reported they can proceed, then re-enable DMA */
730 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
731 pr_info("EEH: Enabled DMA for affected devices\n");
732 rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
733
734 if (rc < 0)
735 goto hard_fail;
736 if (rc) {
737 result = PCI_ERS_RESULT_NEED_RESET;
738 } else {
739 /*
740 * We didn't do PE reset for the case. The PE
741 * is still in frozen state. Clear it before
742 * resuming the PE.
743 */
744 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
745 result = PCI_ERS_RESULT_RECOVERED;
746 }
747 }
748
749 /* If any device has a hard failure, then shut off everything. */
750 if (result == PCI_ERS_RESULT_DISCONNECT) {
751 pr_warn("EEH: Device driver gave up\n");
752 goto hard_fail;
753 }
754
755 /* If any device called out for a reset, then reset the slot */
756 if (result == PCI_ERS_RESULT_NEED_RESET) {
757 pr_info("EEH: Reset without hotplug activity\n");
758 rc = eeh_reset_device(pe, NULL);
759 if (rc) {
760 pr_warn("%s: Cannot reset, err=%d\n",
761 __func__, rc);
762 goto hard_fail;
763 }
764
765 pr_info("EEH: Notify device drivers "
766 "the completion of reset\n");
767 result = PCI_ERS_RESULT_NONE;
768 eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
769 }
770
771 /* All devices should claim they have recovered by now. */
772 if ((result != PCI_ERS_RESULT_RECOVERED) &&
773 (result != PCI_ERS_RESULT_NONE)) {
774 pr_warn("EEH: Not recovered\n");
775 goto hard_fail;
776 }
777
778 /* Tell all device drivers that they can resume operations */
779 pr_info("EEH: Notify device driver to resume\n");
780 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
781
782 return;
783
784 excess_failures:
785 /*
786 * About 90% of all real-life EEH failures in the field
787 * are due to poorly seated PCI cards. Only 10% or so are
788 * due to actual, failed cards.
789 */
790 pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n"
791 "last hour and has been permanently disabled.\n"
792 "Please try reseating or replacing it.\n",
793 pe->phb->global_number, pe->addr,
794 pe->freeze_count);
795 goto perm_error;
796
797 hard_fail:
798 pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n"
799 "Please try reseating or replacing it\n",
800 pe->phb->global_number, pe->addr);
801
802 perm_error:
803 eeh_slot_error_detail(pe, EEH_LOG_PERM);
804
805 /* Notify all devices that they're about to go down. */
806 eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
807
808 /* Mark the PE to be removed permanently */
809 eeh_pe_state_mark(pe, EEH_PE_REMOVED);
810
811 /*
812 * Shut down the device drivers for good. We mark
813 * all removed devices correctly to avoid access
814 * the their PCI config any more.
815 */
816 if (frozen_bus) {
817 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
818
819 pci_lock_rescan_remove();
820 pcibios_remove_pci_devices(frozen_bus);
821 pci_unlock_rescan_remove();
822 }
823 }
824
825 static void eeh_handle_special_event(void)
826 {
827 struct eeh_pe *pe, *phb_pe;
828 struct pci_bus *bus;
829 struct pci_controller *hose;
830 unsigned long flags;
831 int rc;
832
833
834 do {
835 rc = eeh_ops->next_error(&pe);
836
837 switch (rc) {
838 case EEH_NEXT_ERR_DEAD_IOC:
839 /* Mark all PHBs in dead state */
840 eeh_serialize_lock(&flags);
841
842 /* Purge all events */
843 eeh_remove_event(NULL, true);
844
845 list_for_each_entry(hose, &hose_list, list_node) {
846 phb_pe = eeh_phb_pe_get(hose);
847 if (!phb_pe) continue;
848
849 eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
850 }
851
852 eeh_serialize_unlock(flags);
853
854 break;
855 case EEH_NEXT_ERR_FROZEN_PE:
856 case EEH_NEXT_ERR_FENCED_PHB:
857 case EEH_NEXT_ERR_DEAD_PHB:
858 /* Mark the PE in fenced state */
859 eeh_serialize_lock(&flags);
860
861 /* Purge all events of the PHB */
862 eeh_remove_event(pe, true);
863
864 if (rc == EEH_NEXT_ERR_DEAD_PHB)
865 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
866 else
867 eeh_pe_state_mark(pe,
868 EEH_PE_ISOLATED | EEH_PE_RECOVERING);
869
870 eeh_serialize_unlock(flags);
871
872 break;
873 case EEH_NEXT_ERR_NONE:
874 return;
875 default:
876 pr_warn("%s: Invalid value %d from next_error()\n",
877 __func__, rc);
878 return;
879 }
880
881 /*
882 * For fenced PHB and frozen PE, it's handled as normal
883 * event. We have to remove the affected PHBs for dead
884 * PHB and IOC
885 */
886 if (rc == EEH_NEXT_ERR_FROZEN_PE ||
887 rc == EEH_NEXT_ERR_FENCED_PHB) {
888 eeh_handle_normal_event(pe);
889 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
890 } else {
891 pci_lock_rescan_remove();
892 list_for_each_entry(hose, &hose_list, list_node) {
893 phb_pe = eeh_phb_pe_get(hose);
894 if (!phb_pe ||
895 !(phb_pe->state & EEH_PE_ISOLATED) ||
896 (phb_pe->state & EEH_PE_RECOVERING))
897 continue;
898
899 /* Notify all devices to be down */
900 bus = eeh_pe_bus_get(phb_pe);
901 eeh_pe_dev_traverse(pe,
902 eeh_report_failure, NULL);
903 pcibios_remove_pci_devices(bus);
904 }
905 pci_unlock_rescan_remove();
906 }
907
908 /*
909 * If we have detected dead IOC, we needn't proceed
910 * any more since all PHBs would have been removed
911 */
912 if (rc == EEH_NEXT_ERR_DEAD_IOC)
913 break;
914 } while (rc != EEH_NEXT_ERR_NONE);
915 }
916
917 /**
918 * eeh_handle_event - Reset a PCI device after hard lockup.
919 * @pe: EEH PE
920 *
921 * While PHB detects address or data parity errors on particular PCI
922 * slot, the associated PE will be frozen. Besides, DMA's occurring
923 * to wild addresses (which usually happen due to bugs in device
924 * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
925 * #PERR or other misc PCI-related errors also can trigger EEH errors.
926 *
927 * Recovery process consists of unplugging the device driver (which
928 * generated hotplug events to userspace), then issuing a PCI #RST to
929 * the device, then reconfiguring the PCI config space for all bridges
930 * & devices under this slot, and then finally restarting the device
931 * drivers (which cause a second set of hotplug events to go out to
932 * userspace).
933 */
934 void eeh_handle_event(struct eeh_pe *pe)
935 {
936 if (pe)
937 eeh_handle_normal_event(pe);
938 else
939 eeh_handle_special_event();
940 }
This page took 0.123762 seconds and 6 git commands to generate.