2 * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
3 * Copyright IBM Corp. 2004 2005
4 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16 * NON INFRINGEMENT. See the GNU General Public License for more
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
31 #include <asm/eeh_event.h>
32 #include <asm/ppc-pci.h>
33 #include <asm/pci-bridge.h>
38 * eeh_pcid_name - Retrieve name of PCI device driver
41 * This routine is used to retrieve the name of PCI device driver
44 static inline const char *eeh_pcid_name(struct pci_dev
*pdev
)
46 if (pdev
&& pdev
->dev
.driver
)
47 return pdev
->dev
.driver
->name
;
52 * eeh_pcid_get - Get the PCI device driver
55 * The function is used to retrieve the PCI device driver for
56 * the indicated PCI device. Besides, we will increase the reference
57 * of the PCI device driver to prevent that being unloaded on
58 * the fly. Otherwise, kernel crash would be seen.
60 static inline struct pci_driver
*eeh_pcid_get(struct pci_dev
*pdev
)
62 if (!pdev
|| !pdev
->driver
)
65 if (!try_module_get(pdev
->driver
->driver
.owner
))
72 * eeh_pcid_put - Dereference on the PCI device driver
75 * The function is called to do dereference on the PCI device
76 * driver of the indicated PCI device.
78 static inline void eeh_pcid_put(struct pci_dev
*pdev
)
80 if (!pdev
|| !pdev
->driver
)
83 module_put(pdev
->driver
->driver
.owner
);
87 * eeh_disable_irq - Disable interrupt for the recovering device
90 * This routine must be called when reporting temporary or permanent
91 * error to the particular PCI device to disable interrupt of that
92 * device. If the device has enabled MSI or MSI-X interrupt, we needn't
93 * do real work because EEH should freeze DMA transfers for those PCI
94 * devices encountering EEH errors, which includes MSI or MSI-X.
96 static void eeh_disable_irq(struct pci_dev
*dev
)
98 struct eeh_dev
*edev
= pci_dev_to_eeh_dev(dev
);
100 /* Don't disable MSI and MSI-X interrupts. They are
101 * effectively disabled by the DMA Stopped state
102 * when an EEH error occurs.
104 if (dev
->msi_enabled
|| dev
->msix_enabled
)
107 if (!irq_has_action(dev
->irq
))
110 edev
->mode
|= EEH_DEV_IRQ_DISABLED
;
111 disable_irq_nosync(dev
->irq
);
115 * eeh_enable_irq - Enable interrupt for the recovering device
118 * This routine must be called to enable interrupt while failed
119 * device could be resumed.
121 static void eeh_enable_irq(struct pci_dev
*dev
)
123 struct eeh_dev
*edev
= pci_dev_to_eeh_dev(dev
);
125 if ((edev
->mode
) & EEH_DEV_IRQ_DISABLED
) {
126 edev
->mode
&= ~EEH_DEV_IRQ_DISABLED
;
130 * This is just ass backwards. This maze has
131 * unbalanced irq_enable/disable calls. So instead of
132 * finding the root cause it works around the warning
133 * in the irq_enable code by conditionally calling
136 * That's just wrong.The warning in the core code is
137 * there to tell people to fix their assymetries in
138 * their own code, not by abusing the core information
141 * I so wish that the assymetry would be the other way
142 * round and a few more irq_disable calls render that
143 * shit unusable forever.
147 if (irqd_irq_disabled(irq_get_irq_data(dev
->irq
)))
148 enable_irq(dev
->irq
);
152 static bool eeh_dev_removed(struct eeh_dev
*edev
)
154 /* EEH device removed ? */
155 if (!edev
|| (edev
->mode
& EEH_DEV_REMOVED
))
161 static void *eeh_dev_save_state(void *data
, void *userdata
)
163 struct eeh_dev
*edev
= data
;
164 struct pci_dev
*pdev
;
169 pdev
= eeh_dev_to_pci_dev(edev
);
173 pci_save_state(pdev
);
178 * eeh_report_error - Report pci error to each device driver
180 * @userdata: return value
182 * Report an EEH error to each device driver, collect up and
183 * merge the device driver responses. Cumulative response
184 * passed back in "userdata".
186 static void *eeh_report_error(void *data
, void *userdata
)
188 struct eeh_dev
*edev
= (struct eeh_dev
*)data
;
189 struct pci_dev
*dev
= eeh_dev_to_pci_dev(edev
);
190 enum pci_ers_result rc
, *res
= userdata
;
191 struct pci_driver
*driver
;
193 if (!dev
|| eeh_dev_removed(edev
))
195 dev
->error_state
= pci_channel_io_frozen
;
197 driver
= eeh_pcid_get(dev
);
198 if (!driver
) return NULL
;
200 eeh_disable_irq(dev
);
202 if (!driver
->err_handler
||
203 !driver
->err_handler
->error_detected
) {
208 rc
= driver
->err_handler
->error_detected(dev
, pci_channel_io_frozen
);
210 /* A driver that needs a reset trumps all others */
211 if (rc
== PCI_ERS_RESULT_NEED_RESET
) *res
= rc
;
212 if (*res
== PCI_ERS_RESULT_NONE
) *res
= rc
;
219 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
221 * @userdata: return value
223 * Tells each device driver that IO ports, MMIO and config space I/O
224 * are now enabled. Collects up and merges the device driver responses.
225 * Cumulative response passed back in "userdata".
227 static void *eeh_report_mmio_enabled(void *data
, void *userdata
)
229 struct eeh_dev
*edev
= (struct eeh_dev
*)data
;
230 struct pci_dev
*dev
= eeh_dev_to_pci_dev(edev
);
231 enum pci_ers_result rc
, *res
= userdata
;
232 struct pci_driver
*driver
;
234 if (!dev
|| eeh_dev_removed(edev
))
237 driver
= eeh_pcid_get(dev
);
238 if (!driver
) return NULL
;
240 if (!driver
->err_handler
||
241 !driver
->err_handler
->mmio_enabled
||
242 (edev
->mode
& EEH_DEV_NO_HANDLER
)) {
247 rc
= driver
->err_handler
->mmio_enabled(dev
);
249 /* A driver that needs a reset trumps all others */
250 if (rc
== PCI_ERS_RESULT_NEED_RESET
) *res
= rc
;
251 if (*res
== PCI_ERS_RESULT_NONE
) *res
= rc
;
258 * eeh_report_reset - Tell device that slot has been reset
260 * @userdata: return value
262 * This routine must be called while EEH tries to reset particular
263 * PCI device so that the associated PCI device driver could take
264 * some actions, usually to save data the driver needs so that the
265 * driver can work again while the device is recovered.
267 static void *eeh_report_reset(void *data
, void *userdata
)
269 struct eeh_dev
*edev
= (struct eeh_dev
*)data
;
270 struct pci_dev
*dev
= eeh_dev_to_pci_dev(edev
);
271 enum pci_ers_result rc
, *res
= userdata
;
272 struct pci_driver
*driver
;
274 if (!dev
|| eeh_dev_removed(edev
))
276 dev
->error_state
= pci_channel_io_normal
;
278 driver
= eeh_pcid_get(dev
);
279 if (!driver
) return NULL
;
283 if (!driver
->err_handler
||
284 !driver
->err_handler
->slot_reset
||
285 (edev
->mode
& EEH_DEV_NO_HANDLER
)) {
290 rc
= driver
->err_handler
->slot_reset(dev
);
291 if ((*res
== PCI_ERS_RESULT_NONE
) ||
292 (*res
== PCI_ERS_RESULT_RECOVERED
)) *res
= rc
;
293 if (*res
== PCI_ERS_RESULT_DISCONNECT
&&
294 rc
== PCI_ERS_RESULT_NEED_RESET
) *res
= rc
;
300 static void *eeh_dev_restore_state(void *data
, void *userdata
)
302 struct eeh_dev
*edev
= data
;
303 struct pci_dev
*pdev
;
308 pdev
= eeh_dev_to_pci_dev(edev
);
312 pci_restore_state(pdev
);
317 * eeh_report_resume - Tell device to resume normal operations
319 * @userdata: return value
321 * This routine must be called to notify the device driver that it
322 * could resume so that the device driver can do some initialization
323 * to make the recovered device work again.
325 static void *eeh_report_resume(void *data
, void *userdata
)
327 struct eeh_dev
*edev
= (struct eeh_dev
*)data
;
328 struct pci_dev
*dev
= eeh_dev_to_pci_dev(edev
);
329 struct pci_driver
*driver
;
331 if (!dev
|| eeh_dev_removed(edev
))
333 dev
->error_state
= pci_channel_io_normal
;
335 driver
= eeh_pcid_get(dev
);
336 if (!driver
) return NULL
;
340 if (!driver
->err_handler
||
341 !driver
->err_handler
->resume
||
342 (edev
->mode
& EEH_DEV_NO_HANDLER
)) {
343 edev
->mode
&= ~EEH_DEV_NO_HANDLER
;
348 driver
->err_handler
->resume(dev
);
355 * eeh_report_failure - Tell device driver that device is dead.
357 * @userdata: return value
359 * This informs the device driver that the device is permanently
360 * dead, and that no further recovery attempts will be made on it.
362 static void *eeh_report_failure(void *data
, void *userdata
)
364 struct eeh_dev
*edev
= (struct eeh_dev
*)data
;
365 struct pci_dev
*dev
= eeh_dev_to_pci_dev(edev
);
366 struct pci_driver
*driver
;
368 if (!dev
|| eeh_dev_removed(edev
))
370 dev
->error_state
= pci_channel_io_perm_failure
;
372 driver
= eeh_pcid_get(dev
);
373 if (!driver
) return NULL
;
375 eeh_disable_irq(dev
);
377 if (!driver
->err_handler
||
378 !driver
->err_handler
->error_detected
) {
383 driver
->err_handler
->error_detected(dev
, pci_channel_io_perm_failure
);
389 static void *eeh_rmv_device(void *data
, void *userdata
)
391 struct pci_driver
*driver
;
392 struct eeh_dev
*edev
= (struct eeh_dev
*)data
;
393 struct pci_dev
*dev
= eeh_dev_to_pci_dev(edev
);
394 int *removed
= (int *)userdata
;
397 * Actually, we should remove the PCI bridges as well.
398 * However, that's lots of complexity to do that,
399 * particularly some of devices under the bridge might
400 * support EEH. So we just care about PCI devices for
403 if (!dev
|| (dev
->hdr_type
& PCI_HEADER_TYPE_BRIDGE
))
407 * We rely on count-based pcibios_release_device() to
408 * detach permanently offlined PEs. Unfortunately, that's
409 * not reliable enough. We might have the permanently
410 * offlined PEs attached, but we needn't take care of
411 * them and their child devices.
413 if (eeh_dev_removed(edev
))
416 driver
= eeh_pcid_get(dev
);
419 if (driver
->err_handler
&&
420 driver
->err_handler
->error_detected
&&
421 driver
->err_handler
->slot_reset
&&
422 driver
->err_handler
->resume
)
426 /* Remove it from PCI subsystem */
427 pr_debug("EEH: Removing %s without EEH sensitive driver\n",
429 edev
->bus
= dev
->bus
;
430 edev
->mode
|= EEH_DEV_DISCONNECTED
;
433 pci_lock_rescan_remove();
434 pci_stop_and_remove_bus_device(dev
);
435 pci_unlock_rescan_remove();
440 static void *eeh_pe_detach_dev(void *data
, void *userdata
)
442 struct eeh_pe
*pe
= (struct eeh_pe
*)data
;
443 struct eeh_dev
*edev
, *tmp
;
445 eeh_pe_for_each_dev(pe
, edev
, tmp
) {
446 if (!(edev
->mode
& EEH_DEV_DISCONNECTED
))
449 edev
->mode
&= ~(EEH_DEV_DISCONNECTED
| EEH_DEV_IRQ_DISABLED
);
450 eeh_rmv_from_parent_pe(edev
);
457 * Explicitly clear PE's frozen state for PowerNV where
458 * we have frozen PE until BAR restore is completed. It's
459 * harmless to clear it for pSeries. To be consistent with
460 * PE reset (for 3 times), we try to clear the frozen state
461 * for 3 times as well.
463 static void *__eeh_clear_pe_frozen_state(void *data
, void *flag
)
465 struct eeh_pe
*pe
= (struct eeh_pe
*)data
;
466 bool *clear_sw_state
= flag
;
469 for (i
= 0; rc
&& i
< 3; i
++)
470 rc
= eeh_unfreeze_pe(pe
, clear_sw_state
);
472 /* Stop immediately on any errors */
474 pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n",
475 __func__
, rc
, pe
->phb
->global_number
, pe
->addr
);
482 static int eeh_clear_pe_frozen_state(struct eeh_pe
*pe
,
487 rc
= eeh_pe_traverse(pe
, __eeh_clear_pe_frozen_state
, &clear_sw_state
);
489 eeh_pe_state_clear(pe
, EEH_PE_ISOLATED
);
491 return rc
? -EIO
: 0;
494 int eeh_pe_reset_and_recover(struct eeh_pe
*pe
)
498 /* Bail if the PE is being recovered */
499 if (pe
->state
& EEH_PE_RECOVERING
)
502 /* Put the PE into recovery mode */
503 eeh_pe_state_mark(pe
, EEH_PE_RECOVERING
);
506 eeh_pe_dev_traverse(pe
, eeh_dev_save_state
, NULL
);
509 eeh_pe_dev_traverse(pe
, eeh_report_error
, &result
);
512 ret
= eeh_reset_pe(pe
);
514 eeh_pe_state_clear(pe
, EEH_PE_RECOVERING
);
518 /* Unfreeze the PE */
519 ret
= eeh_clear_pe_frozen_state(pe
, true);
521 eeh_pe_state_clear(pe
, EEH_PE_RECOVERING
);
525 /* Notify completion of reset */
526 eeh_pe_dev_traverse(pe
, eeh_report_reset
, &result
);
528 /* Restore device state */
529 eeh_pe_dev_traverse(pe
, eeh_dev_restore_state
, NULL
);
532 eeh_pe_dev_traverse(pe
, eeh_report_resume
, NULL
);
534 /* Clear recovery mode */
535 eeh_pe_state_clear(pe
, EEH_PE_RECOVERING
);
541 * eeh_reset_device - Perform actual reset of a pci slot
543 * @bus: PCI bus corresponding to the isolcated slot
545 * This routine must be called to do reset on the indicated PE.
546 * During the reset, udev might be invoked because those affected
547 * PCI devices will be removed and then added.
549 static int eeh_reset_device(struct eeh_pe
*pe
, struct pci_bus
*bus
)
551 struct pci_bus
*frozen_bus
= eeh_pe_bus_get(pe
);
552 struct timeval tstamp
;
553 int cnt
, rc
, removed
= 0;
555 /* pcibios will clear the counter; save the value */
556 cnt
= pe
->freeze_count
;
560 * We don't remove the corresponding PE instances because
561 * we need the information afterwords. The attached EEH
562 * devices are expected to be attached soon when calling
563 * into pcibios_add_pci_devices().
565 eeh_pe_state_mark(pe
, EEH_PE_KEEP
);
567 eeh_pe_state_clear(pe
, EEH_PE_PRI_BUS
);
568 pci_lock_rescan_remove();
569 pcibios_remove_pci_devices(bus
);
570 pci_unlock_rescan_remove();
571 } else if (frozen_bus
) {
572 eeh_pe_dev_traverse(pe
, eeh_rmv_device
, &removed
);
576 * Reset the pci controller. (Asserts RST#; resets config space).
577 * Reconfigure bridges and devices. Don't try to bring the system
578 * up if the reset failed for some reason.
580 * During the reset, it's very dangerous to have uncontrolled PCI
581 * config accesses. So we prefer to block them. However, controlled
582 * PCI config accesses initiated from EEH itself are allowed.
584 rc
= eeh_reset_pe(pe
);
588 pci_lock_rescan_remove();
591 eeh_ops
->configure_bridge(pe
);
592 eeh_pe_restore_bars(pe
);
594 /* Clear frozen state */
595 rc
= eeh_clear_pe_frozen_state(pe
, false);
599 /* Give the system 5 seconds to finish running the user-space
600 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
601 * this is a hack, but if we don't do this, and try to bring
602 * the device up before the scripts have taken it down,
603 * potentially weird things happen.
606 pr_info("EEH: Sleep 5s ahead of complete hotplug\n");
610 * The EEH device is still connected with its parent
611 * PE. We should disconnect it so the binding can be
612 * rebuilt when adding PCI devices.
614 eeh_pe_traverse(pe
, eeh_pe_detach_dev
, NULL
);
615 pcibios_add_pci_devices(bus
);
616 } else if (frozen_bus
&& removed
) {
617 pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
620 eeh_pe_traverse(pe
, eeh_pe_detach_dev
, NULL
);
621 pcibios_add_pci_devices(frozen_bus
);
623 eeh_pe_state_clear(pe
, EEH_PE_KEEP
);
626 pe
->freeze_count
= cnt
;
628 pci_unlock_rescan_remove();
632 /* The longest amount of time to wait for a pci device
633 * to come back on line, in seconds.
635 #define MAX_WAIT_FOR_RECOVERY 300
637 static void eeh_handle_normal_event(struct eeh_pe
*pe
)
639 struct pci_bus
*frozen_bus
;
641 enum pci_ers_result result
= PCI_ERS_RESULT_NONE
;
643 frozen_bus
= eeh_pe_bus_get(pe
);
645 pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
646 __func__
, pe
->phb
->global_number
, pe
->addr
);
650 eeh_pe_update_time_stamp(pe
);
652 if (pe
->freeze_count
> eeh_max_freezes
)
653 goto excess_failures
;
654 pr_warn("EEH: This PCI device has failed %d times in the last hour\n",
657 /* Walk the various device drivers attached to this slot through
658 * a reset sequence, giving each an opportunity to do what it needs
659 * to accomplish the reset. Each child gets a report of the
660 * status ... if any child can't handle the reset, then the entire
661 * slot is dlpar removed and added.
663 * When the PHB is fenced, we have to issue a reset to recover from
664 * the error. Override the result if necessary to have partially
665 * hotplug for this case.
667 pr_info("EEH: Notify device drivers to shutdown\n");
668 eeh_pe_dev_traverse(pe
, eeh_report_error
, &result
);
669 if ((pe
->type
& EEH_PE_PHB
) &&
670 result
!= PCI_ERS_RESULT_NONE
&&
671 result
!= PCI_ERS_RESULT_NEED_RESET
)
672 result
= PCI_ERS_RESULT_NEED_RESET
;
674 /* Get the current PCI slot state. This can take a long time,
675 * sometimes over 300 seconds for certain systems.
677 rc
= eeh_ops
->wait_state(pe
, MAX_WAIT_FOR_RECOVERY
*1000);
678 if (rc
< 0 || rc
== EEH_STATE_NOT_SUPPORT
) {
679 pr_warn("EEH: Permanent failure\n");
683 /* Since rtas may enable MMIO when posting the error log,
684 * don't post the error log until after all dev drivers
685 * have been informed.
687 pr_info("EEH: Collect temporary log\n");
688 eeh_slot_error_detail(pe
, EEH_LOG_TEMP
);
690 /* If all device drivers were EEH-unaware, then shut
691 * down all of the device drivers, and hope they
692 * go down willingly, without panicing the system.
694 if (result
== PCI_ERS_RESULT_NONE
) {
695 pr_info("EEH: Reset with hotplug activity\n");
696 rc
= eeh_reset_device(pe
, frozen_bus
);
698 pr_warn("%s: Unable to reset, err=%d\n",
704 /* If all devices reported they can proceed, then re-enable MMIO */
705 if (result
== PCI_ERS_RESULT_CAN_RECOVER
) {
706 pr_info("EEH: Enable I/O for affected devices\n");
707 rc
= eeh_pci_enable(pe
, EEH_OPT_THAW_MMIO
);
712 result
= PCI_ERS_RESULT_NEED_RESET
;
714 pr_info("EEH: Notify device drivers to resume I/O\n");
715 eeh_pe_dev_traverse(pe
, eeh_report_mmio_enabled
, &result
);
719 /* If all devices reported they can proceed, then re-enable DMA */
720 if (result
== PCI_ERS_RESULT_CAN_RECOVER
) {
721 pr_info("EEH: Enabled DMA for affected devices\n");
722 rc
= eeh_pci_enable(pe
, EEH_OPT_THAW_DMA
);
727 result
= PCI_ERS_RESULT_NEED_RESET
;
730 * We didn't do PE reset for the case. The PE
731 * is still in frozen state. Clear it before
734 eeh_pe_state_clear(pe
, EEH_PE_ISOLATED
);
735 result
= PCI_ERS_RESULT_RECOVERED
;
739 /* If any device has a hard failure, then shut off everything. */
740 if (result
== PCI_ERS_RESULT_DISCONNECT
) {
741 pr_warn("EEH: Device driver gave up\n");
745 /* If any device called out for a reset, then reset the slot */
746 if (result
== PCI_ERS_RESULT_NEED_RESET
) {
747 pr_info("EEH: Reset without hotplug activity\n");
748 rc
= eeh_reset_device(pe
, NULL
);
750 pr_warn("%s: Cannot reset, err=%d\n",
755 pr_info("EEH: Notify device drivers "
756 "the completion of reset\n");
757 result
= PCI_ERS_RESULT_NONE
;
758 eeh_pe_dev_traverse(pe
, eeh_report_reset
, &result
);
761 /* All devices should claim they have recovered by now. */
762 if ((result
!= PCI_ERS_RESULT_RECOVERED
) &&
763 (result
!= PCI_ERS_RESULT_NONE
)) {
764 pr_warn("EEH: Not recovered\n");
768 /* Tell all device drivers that they can resume operations */
769 pr_info("EEH: Notify device driver to resume\n");
770 eeh_pe_dev_traverse(pe
, eeh_report_resume
, NULL
);
776 * About 90% of all real-life EEH failures in the field
777 * are due to poorly seated PCI cards. Only 10% or so are
778 * due to actual, failed cards.
780 pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n"
781 "last hour and has been permanently disabled.\n"
782 "Please try reseating or replacing it.\n",
783 pe
->phb
->global_number
, pe
->addr
,
788 pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n"
789 "Please try reseating or replacing it\n",
790 pe
->phb
->global_number
, pe
->addr
);
793 eeh_slot_error_detail(pe
, EEH_LOG_PERM
);
795 /* Notify all devices that they're about to go down. */
796 eeh_pe_dev_traverse(pe
, eeh_report_failure
, NULL
);
798 /* Mark the PE to be removed permanently */
799 eeh_pe_state_mark(pe
, EEH_PE_REMOVED
);
802 * Shut down the device drivers for good. We mark
803 * all removed devices correctly to avoid access
804 * the their PCI config any more.
807 eeh_pe_state_clear(pe
, EEH_PE_PRI_BUS
);
808 eeh_pe_dev_mode_mark(pe
, EEH_DEV_REMOVED
);
810 pci_lock_rescan_remove();
811 pcibios_remove_pci_devices(frozen_bus
);
812 pci_unlock_rescan_remove();
816 static void eeh_handle_special_event(void)
818 struct eeh_pe
*pe
, *phb_pe
;
820 struct pci_controller
*hose
;
826 rc
= eeh_ops
->next_error(&pe
);
829 case EEH_NEXT_ERR_DEAD_IOC
:
830 /* Mark all PHBs in dead state */
831 eeh_serialize_lock(&flags
);
833 /* Purge all events */
834 eeh_remove_event(NULL
, true);
836 list_for_each_entry(hose
, &hose_list
, list_node
) {
837 phb_pe
= eeh_phb_pe_get(hose
);
838 if (!phb_pe
) continue;
840 eeh_pe_state_mark(phb_pe
, EEH_PE_ISOLATED
);
843 eeh_serialize_unlock(flags
);
846 case EEH_NEXT_ERR_FROZEN_PE
:
847 case EEH_NEXT_ERR_FENCED_PHB
:
848 case EEH_NEXT_ERR_DEAD_PHB
:
849 /* Mark the PE in fenced state */
850 eeh_serialize_lock(&flags
);
852 /* Purge all events of the PHB */
853 eeh_remove_event(pe
, true);
855 if (rc
== EEH_NEXT_ERR_DEAD_PHB
)
856 eeh_pe_state_mark(pe
, EEH_PE_ISOLATED
);
858 eeh_pe_state_mark(pe
,
859 EEH_PE_ISOLATED
| EEH_PE_RECOVERING
);
861 eeh_serialize_unlock(flags
);
864 case EEH_NEXT_ERR_NONE
:
867 pr_warn("%s: Invalid value %d from next_error()\n",
873 * For fenced PHB and frozen PE, it's handled as normal
874 * event. We have to remove the affected PHBs for dead
877 if (rc
== EEH_NEXT_ERR_FROZEN_PE
||
878 rc
== EEH_NEXT_ERR_FENCED_PHB
) {
879 eeh_handle_normal_event(pe
);
880 eeh_pe_state_clear(pe
, EEH_PE_RECOVERING
);
882 pci_lock_rescan_remove();
883 list_for_each_entry(hose
, &hose_list
, list_node
) {
884 phb_pe
= eeh_phb_pe_get(hose
);
886 !(phb_pe
->state
& EEH_PE_ISOLATED
) ||
887 (phb_pe
->state
& EEH_PE_RECOVERING
))
890 /* Notify all devices to be down */
891 eeh_pe_state_clear(pe
, EEH_PE_PRI_BUS
);
892 bus
= eeh_pe_bus_get(phb_pe
);
893 eeh_pe_dev_traverse(pe
,
894 eeh_report_failure
, NULL
);
895 pcibios_remove_pci_devices(bus
);
897 pci_unlock_rescan_remove();
901 * If we have detected dead IOC, we needn't proceed
902 * any more since all PHBs would have been removed
904 if (rc
== EEH_NEXT_ERR_DEAD_IOC
)
906 } while (rc
!= EEH_NEXT_ERR_NONE
);
910 * eeh_handle_event - Reset a PCI device after hard lockup.
913 * While PHB detects address or data parity errors on particular PCI
914 * slot, the associated PE will be frozen. Besides, DMA's occurring
915 * to wild addresses (which usually happen due to bugs in device
916 * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
917 * #PERR or other misc PCI-related errors also can trigger EEH errors.
919 * Recovery process consists of unplugging the device driver (which
920 * generated hotplug events to userspace), then issuing a PCI #RST to
921 * the device, then reconfiguring the PCI config space for all bridges
922 * & devices under this slot, and then finally restarting the device
923 * drivers (which cause a second set of hotplug events to go out to
926 void eeh_handle_event(struct eeh_pe
*pe
)
929 eeh_handle_normal_event(pe
);
931 eeh_handle_special_event();