3 * Purpose: Generic MCA handling layer
5 * Updated for latest kernel
6 * Copyright (C) 2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
9 * Copyright (C) 2002 Dell Inc.
10 * Copyright (C) Matt Domsch (Matt_Domsch@dell.com)
12 * Copyright (C) 2002 Intel
13 * Copyright (C) Jenna Hall (jenna.s.hall@intel.com)
15 * Copyright (C) 2001 Intel
16 * Copyright (C) Fred Lewis (frederick.v.lewis@intel.com)
18 * Copyright (C) 2000 Intel
19 * Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com)
21 * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
22 * Copyright (C) Vijay Chander(vijay@engr.sgi.com)
24 * 03/04/15 D. Mosberger Added INIT backtrace support.
25 * 02/03/25 M. Domsch GUID cleanups
27 * 02/01/04 J. Hall Aligned MCA stack to 16 bytes, added platform vs. CPU
28 * error flag, set SAL default return values, changed
29 * error record structure to linked list, added init call
30 * to sal_get_state_info_size().
32 * 01/01/03 F. Lewis Added setup of CMCI and CPEI IRQs, logging of corrected
33 * platform errors, completed code for logging of
34 * corrected & uncorrected machine check errors, and
35 * updated for conformance with Nov. 2000 revision of the
37 * 00/03/29 C. Fleckenstein Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
38 * added min save state dump, added INIT handler.
40 * 2003-12-08 Keith Owens <kaos@sgi.com>
41 * smp_call_function() must not be called from interrupt context (can
42 * deadlock on tasklist_lock). Use keventd to call smp_call_function().
44 * 2004-02-01 Keith Owens <kaos@sgi.com>
45 * Avoid deadlock when using printk() for MCA and INIT records.
46 * Delete all record printing code, moved to salinfo_decode in user space.
47 * Mark variables and functions static where possible.
48 * Delete dead variables and functions.
49 * Reorder to remove the need for forward declarations and to consolidate
52 * 2005-08-12 Keith Owens <kaos@sgi.com>
53 * Convert MCA/INIT handlers to use per event stacks and SAL/OS state.
55 #include <linux/config.h>
56 #include <linux/types.h>
57 #include <linux/init.h>
58 #include <linux/sched.h>
59 #include <linux/interrupt.h>
60 #include <linux/irq.h>
61 #include <linux/kallsyms.h>
62 #include <linux/smp_lock.h>
63 #include <linux/bootmem.h>
64 #include <linux/acpi.h>
65 #include <linux/timer.h>
66 #include <linux/module.h>
67 #include <linux/kernel.h>
68 #include <linux/smp.h>
69 #include <linux/workqueue.h>
71 #include <asm/delay.h>
72 #include <asm/machvec.h>
73 #include <asm/meminit.h>
75 #include <asm/ptrace.h>
76 #include <asm/system.h>
81 #include <asm/hw_irq.h>
85 #if defined(IA64_MCA_DEBUG_INFO)
86 # define IA64_MCA_DEBUG(fmt...) printk(fmt)
88 # define IA64_MCA_DEBUG(fmt...)
91 /* Used by mca_asm.S */
92 u32 ia64_mca_serialize
;
93 DEFINE_PER_CPU(u64
, ia64_mca_data
); /* == __per_cpu_mca[smp_processor_id()] */
94 DEFINE_PER_CPU(u64
, ia64_mca_per_cpu_pte
); /* PTE to map per-CPU area */
95 DEFINE_PER_CPU(u64
, ia64_mca_pal_pte
); /* PTE to map PAL code */
96 DEFINE_PER_CPU(u64
, ia64_mca_pal_base
); /* vaddr PAL code granule */
98 unsigned long __per_cpu_mca
[NR_CPUS
];
101 extern void ia64_os_init_dispatch_monarch (void);
102 extern void ia64_os_init_dispatch_slave (void);
104 static int monarch_cpu
= -1;
106 static ia64_mc_info_t ia64_mc_info
;
108 #define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
109 #define MIN_CPE_POLL_INTERVAL (2*60*HZ) /* 2 minutes */
110 #define CMC_POLL_INTERVAL (1*60*HZ) /* 1 minute */
111 #define CPE_HISTORY_LENGTH 5
112 #define CMC_HISTORY_LENGTH 5
114 static struct timer_list cpe_poll_timer
;
115 static struct timer_list cmc_poll_timer
;
117 * This variable tells whether we are currently in polling mode.
118 * Start with this in the wrong state so we won't play w/ timers
119 * before the system is ready.
121 static int cmc_polling_enabled
= 1;
124 * Clearing this variable prevents CPE polling from getting activated
125 * in mca_late_init. Use it if your system doesn't provide a CPEI,
126 * but encounters problems retrieving CPE logs. This should only be
127 * necessary for debugging.
129 static int cpe_poll_enabled
= 1;
131 extern void salinfo_log_wakeup(int type
, u8
*buffer
, u64 size
, int irqsafe
);
136 * IA64_MCA log support
138 #define IA64_MAX_LOGS 2 /* Double-buffering for nested MCAs */
139 #define IA64_MAX_LOG_TYPES 4 /* MCA, INIT, CMC, CPE */
141 typedef struct ia64_state_log_s
145 unsigned long isl_count
;
146 ia64_err_rec_t
*isl_log
[IA64_MAX_LOGS
]; /* need space to store header + error log */
149 static ia64_state_log_t ia64_state_log
[IA64_MAX_LOG_TYPES
];
151 #define IA64_LOG_ALLOCATE(it, size) \
152 {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
153 (ia64_err_rec_t *)alloc_bootmem(size); \
154 ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
155 (ia64_err_rec_t *)alloc_bootmem(size);}
156 #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
157 #define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
158 #define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
159 #define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index
160 #define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index
161 #define IA64_LOG_INDEX_INC(it) \
162 {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
163 ia64_state_log[it].isl_count++;}
164 #define IA64_LOG_INDEX_DEC(it) \
165 ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
166 #define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
167 #define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
168 #define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
172 * Reset the OS ia64 log buffer
173 * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
177 ia64_log_init(int sal_info_type
)
181 IA64_LOG_NEXT_INDEX(sal_info_type
) = 0;
182 IA64_LOG_LOCK_INIT(sal_info_type
);
184 // SAL will tell us the maximum size of any error record of this type
185 max_size
= ia64_sal_get_state_info_size(sal_info_type
);
187 /* alloc_bootmem() doesn't like zero-sized allocations! */
190 // set up OS data structures to hold error info
191 IA64_LOG_ALLOCATE(sal_info_type
, max_size
);
192 memset(IA64_LOG_CURR_BUFFER(sal_info_type
), 0, max_size
);
193 memset(IA64_LOG_NEXT_BUFFER(sal_info_type
), 0, max_size
);
199 * Get the current MCA log from SAL and copy it into the OS log buffer.
201 * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
202 * irq_safe whether you can use printk at this point
203 * Outputs : size (total record length)
204 * *buffer (ptr to error record)
208 ia64_log_get(int sal_info_type
, u8
**buffer
, int irq_safe
)
210 sal_log_record_header_t
*log_buffer
;
214 IA64_LOG_LOCK(sal_info_type
);
216 /* Get the process state information */
217 log_buffer
= IA64_LOG_NEXT_BUFFER(sal_info_type
);
219 total_len
= ia64_sal_get_state_info(sal_info_type
, (u64
*)log_buffer
);
222 IA64_LOG_INDEX_INC(sal_info_type
);
223 IA64_LOG_UNLOCK(sal_info_type
);
225 IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. "
226 "Record length = %ld\n", __FUNCTION__
, sal_info_type
, total_len
);
228 *buffer
= (u8
*) log_buffer
;
231 IA64_LOG_UNLOCK(sal_info_type
);
237 * ia64_mca_log_sal_error_record
239 * This function retrieves a specified error record type from SAL
240 * and wakes up any processes waiting for error records.
242 * Inputs : sal_info_type (Type of error record MCA/CMC/CPE)
243 * FIXME: remove MCA and irq_safe.
246 ia64_mca_log_sal_error_record(int sal_info_type
)
249 sal_log_record_header_t
*rh
;
251 int irq_safe
= sal_info_type
!= SAL_INFO_TYPE_MCA
;
252 #ifdef IA64_MCA_DEBUG_INFO
253 static const char * const rec_name
[] = { "MCA", "INIT", "CMC", "CPE" };
256 size
= ia64_log_get(sal_info_type
, &buffer
, irq_safe
);
260 salinfo_log_wakeup(sal_info_type
, buffer
, size
, irq_safe
);
263 IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
265 sal_info_type
< ARRAY_SIZE(rec_name
) ? rec_name
[sal_info_type
] : "UNKNOWN");
267 /* Clear logs from corrected errors in case there's no user-level logger */
268 rh
= (sal_log_record_header_t
*)buffer
;
269 if (rh
->severity
== sal_log_severity_corrected
)
270 ia64_sal_clear_state_info(sal_info_type
);
274 * platform dependent error handling
276 #ifndef PLATFORM_MCA_HANDLERS
283 ia64_mca_cpe_int_handler (int cpe_irq
, void *arg
, struct pt_regs
*ptregs
)
285 static unsigned long cpe_history
[CPE_HISTORY_LENGTH
];
287 static DEFINE_SPINLOCK(cpe_history_lock
);
289 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
290 __FUNCTION__
, cpe_irq
, smp_processor_id());
292 /* SAL spec states this should run w/ interrupts enabled */
295 /* Get the CPE error record and log it */
296 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE
);
298 spin_lock(&cpe_history_lock
);
299 if (!cpe_poll_enabled
&& cpe_vector
>= 0) {
301 int i
, count
= 1; /* we know 1 happened now */
302 unsigned long now
= jiffies
;
304 for (i
= 0; i
< CPE_HISTORY_LENGTH
; i
++) {
305 if (now
- cpe_history
[i
] <= HZ
)
309 IA64_MCA_DEBUG(KERN_INFO
"CPE threshold %d/%d\n", count
, CPE_HISTORY_LENGTH
);
310 if (count
>= CPE_HISTORY_LENGTH
) {
312 cpe_poll_enabled
= 1;
313 spin_unlock(&cpe_history_lock
);
314 disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR
));
317 * Corrected errors will still be corrected, but
318 * make sure there's a log somewhere that indicates
319 * something is generating more than we can handle.
321 printk(KERN_WARNING
"WARNING: Switching to polling CPE handler; error records may be lost\n");
323 mod_timer(&cpe_poll_timer
, jiffies
+ MIN_CPE_POLL_INTERVAL
);
325 /* lock already released, get out now */
328 cpe_history
[index
++] = now
;
329 if (index
== CPE_HISTORY_LENGTH
)
333 spin_unlock(&cpe_history_lock
);
337 #endif /* CONFIG_ACPI */
341 * ia64_mca_register_cpev
343 * Register the corrected platform error vector with SAL.
346 * cpev Corrected Platform Error Vector number
352 ia64_mca_register_cpev (int cpev
)
354 /* Register the CPE interrupt vector with SAL */
355 struct ia64_sal_retval isrv
;
357 isrv
= ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT
, SAL_MC_PARAM_MECHANISM_INT
, cpev
, 0, 0);
359 printk(KERN_ERR
"Failed to register Corrected Platform "
360 "Error interrupt vector with SAL (status %ld)\n", isrv
.status
);
364 IA64_MCA_DEBUG("%s: corrected platform error "
365 "vector %#x registered\n", __FUNCTION__
, cpev
);
367 #endif /* CONFIG_ACPI */
369 #endif /* PLATFORM_MCA_HANDLERS */
372 * ia64_mca_cmc_vector_setup
374 * Setup the corrected machine check vector register in the processor.
375 * (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
376 * This function is invoked on a per-processor basis.
385 ia64_mca_cmc_vector_setup (void)
389 cmcv
.cmcv_regval
= 0;
390 cmcv
.cmcv_mask
= 1; /* Mask/disable interrupt at first */
391 cmcv
.cmcv_vector
= IA64_CMC_VECTOR
;
392 ia64_setreg(_IA64_REG_CR_CMCV
, cmcv
.cmcv_regval
);
394 IA64_MCA_DEBUG("%s: CPU %d corrected "
395 "machine check vector %#x registered.\n",
396 __FUNCTION__
, smp_processor_id(), IA64_CMC_VECTOR
);
398 IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
399 __FUNCTION__
, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV
));
403 * ia64_mca_cmc_vector_disable
405 * Mask the corrected machine check vector register in the processor.
406 * This function is invoked on a per-processor basis.
415 ia64_mca_cmc_vector_disable (void *dummy
)
419 cmcv
.cmcv_regval
= ia64_getreg(_IA64_REG_CR_CMCV
);
421 cmcv
.cmcv_mask
= 1; /* Mask/disable interrupt */
422 ia64_setreg(_IA64_REG_CR_CMCV
, cmcv
.cmcv_regval
);
424 IA64_MCA_DEBUG("%s: CPU %d corrected "
425 "machine check vector %#x disabled.\n",
426 __FUNCTION__
, smp_processor_id(), cmcv
.cmcv_vector
);
430 * ia64_mca_cmc_vector_enable
432 * Unmask the corrected machine check vector register in the processor.
433 * This function is invoked on a per-processor basis.
442 ia64_mca_cmc_vector_enable (void *dummy
)
446 cmcv
.cmcv_regval
= ia64_getreg(_IA64_REG_CR_CMCV
);
448 cmcv
.cmcv_mask
= 0; /* Unmask/enable interrupt */
449 ia64_setreg(_IA64_REG_CR_CMCV
, cmcv
.cmcv_regval
);
451 IA64_MCA_DEBUG("%s: CPU %d corrected "
452 "machine check vector %#x enabled.\n",
453 __FUNCTION__
, smp_processor_id(), cmcv
.cmcv_vector
);
457 * ia64_mca_cmc_vector_disable_keventd
459 * Called via keventd (smp_call_function() is not safe in interrupt context) to
460 * disable the cmc interrupt vector.
463 ia64_mca_cmc_vector_disable_keventd(void *unused
)
465 on_each_cpu(ia64_mca_cmc_vector_disable
, NULL
, 1, 0);
469 * ia64_mca_cmc_vector_enable_keventd
471 * Called via keventd (smp_call_function() is not safe in interrupt context) to
472 * enable the cmc interrupt vector.
475 ia64_mca_cmc_vector_enable_keventd(void *unused
)
477 on_each_cpu(ia64_mca_cmc_vector_enable
, NULL
, 1, 0);
483 * Send an inter-cpu interrupt to wake-up a particular cpu
484 * and mark that cpu to be out of rendez.
490 ia64_mca_wakeup(int cpu
)
492 platform_send_ipi(cpu
, IA64_MCA_WAKEUP_VECTOR
, IA64_IPI_DM_INT
, 0);
493 ia64_mc_info
.imi_rendez_checkin
[cpu
] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE
;
498 * ia64_mca_wakeup_all
500 * Wakeup all the cpus which have rendez'ed previously.
506 ia64_mca_wakeup_all(void)
510 /* Clear the Rendez checkin flag for all cpus */
511 for(cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
512 if (!cpu_online(cpu
))
514 if (ia64_mc_info
.imi_rendez_checkin
[cpu
] == IA64_MCA_RENDEZ_CHECKIN_DONE
)
515 ia64_mca_wakeup(cpu
);
521 * ia64_mca_rendez_interrupt_handler
523 * This is handler used to put slave processors into spinloop
524 * while the monarch processor does the mca handling and later
525 * wake each slave up once the monarch is done.
531 ia64_mca_rendez_int_handler(int rendez_irq
, void *arg
, struct pt_regs
*ptregs
)
534 int cpu
= smp_processor_id();
536 /* Mask all interrupts */
537 local_irq_save(flags
);
539 ia64_mc_info
.imi_rendez_checkin
[cpu
] = IA64_MCA_RENDEZ_CHECKIN_DONE
;
540 /* Register with the SAL monarch that the slave has
543 ia64_sal_mc_rendez();
545 /* Wait for the monarch cpu to exit. */
546 while (monarch_cpu
!= -1)
547 cpu_relax(); /* spin until monarch leaves */
549 /* Enable all interrupts */
550 local_irq_restore(flags
);
555 * ia64_mca_wakeup_int_handler
557 * The interrupt handler for processing the inter-cpu interrupt to the
558 * slave cpu which was spinning in the rendez loop.
559 * Since this spinning is done by turning off the interrupts and
560 * polling on the wakeup-interrupt bit in the IRR, there is
561 * nothing useful to be done in the handler.
563 * Inputs : wakeup_irq (Wakeup-interrupt bit)
564 * arg (Interrupt handler specific argument)
565 * ptregs (Exception frame at the time of the interrupt)
570 ia64_mca_wakeup_int_handler(int wakeup_irq
, void *arg
, struct pt_regs
*ptregs
)
575 /* Function pointer for extra MCA recovery */
576 int (*ia64_mca_ucmc_extension
)
577 (void*,struct ia64_sal_os_state
*)
581 ia64_reg_MCA_extension(int (*fn
)(void *, struct ia64_sal_os_state
*))
583 if (ia64_mca_ucmc_extension
)
586 ia64_mca_ucmc_extension
= fn
;
591 ia64_unreg_MCA_extension(void)
593 if (ia64_mca_ucmc_extension
)
594 ia64_mca_ucmc_extension
= NULL
;
597 EXPORT_SYMBOL(ia64_reg_MCA_extension
);
598 EXPORT_SYMBOL(ia64_unreg_MCA_extension
);
602 copy_reg(const u64
*fr
, u64 fnat
, u64
*tr
, u64
*tnat
)
604 u64 fslot
, tslot
, nat
;
606 fslot
= ((unsigned long)fr
>> 3) & 63;
607 tslot
= ((unsigned long)tr
>> 3) & 63;
608 *tnat
&= ~(1UL << tslot
);
609 nat
= (fnat
>> fslot
) & 1;
610 *tnat
|= (nat
<< tslot
);
613 /* On entry to this routine, we are running on the per cpu stack, see
614 * mca_asm.h. The original stack has not been touched by this event. Some of
615 * the original stack's registers will be in the RBS on this stack. This stack
616 * also contains a partial pt_regs and switch_stack, the rest of the data is in
619 * The first thing to do is modify the original stack to look like a blocked
620 * task so we can run backtrace on the original task. Also mark the per cpu
621 * stack as current to ensure that we use the correct task state, it also means
622 * that we can do backtrace on the MCA/INIT handler code itself.
626 ia64_mca_modify_original_stack(struct pt_regs
*regs
,
627 const struct switch_stack
*sw
,
628 struct ia64_sal_os_state
*sos
,
631 char *p
, comm
[sizeof(current
->comm
)];
633 extern char ia64_leave_kernel
[]; /* Need asm address, not function descriptor */
634 const pal_min_state_area_t
*ms
= sos
->pal_min_state
;
635 task_t
*previous_current
;
636 struct pt_regs
*old_regs
;
637 struct switch_stack
*old_sw
;
638 unsigned size
= sizeof(struct pt_regs
) +
639 sizeof(struct switch_stack
) + 16;
640 u64
*old_bspstore
, *old_bsp
;
641 u64
*new_bspstore
, *new_bsp
;
642 u64 old_unat
, old_rnat
, new_rnat
, nat
;
643 u64 slots
, loadrs
= regs
->loadrs
;
644 u64 r12
= ms
->pmsa_gr
[12-1], r13
= ms
->pmsa_gr
[13-1];
645 u64 ar_bspstore
= regs
->ar_bspstore
;
646 u64 ar_bsp
= regs
->ar_bspstore
+ (loadrs
>> 16);
649 int cpu
= smp_processor_id();
651 previous_current
= curr_task(cpu
);
652 set_curr_task(cpu
, current
);
653 if ((p
= strchr(current
->comm
, ' ')))
656 /* Best effort attempt to cope with MCA/INIT delivered while in
659 regs
->cr_ipsr
= ms
->pmsa_ipsr
;
660 if (ia64_psr(regs
)->dt
== 0) {
672 if (ia64_psr(regs
)->rt
== 0) {
685 /* mca_asm.S ia64_old_stack() cannot assume that the dirty registers
686 * have been copied to the old stack, the old stack may fail the
687 * validation tests below. So ia64_old_stack() must restore the dirty
688 * registers from the new stack. The old and new bspstore probably
689 * have different alignments, so loadrs calculated on the old bsp
690 * cannot be used to restore from the new bsp. Calculate a suitable
691 * loadrs for the new stack and save it in the new pt_regs, where
692 * ia64_old_stack() can get it.
694 old_bspstore
= (u64
*)ar_bspstore
;
695 old_bsp
= (u64
*)ar_bsp
;
696 slots
= ia64_rse_num_regs(old_bspstore
, old_bsp
);
697 new_bspstore
= (u64
*)((u64
)current
+ IA64_RBS_OFFSET
);
698 new_bsp
= ia64_rse_skip_regs(new_bspstore
, slots
);
699 regs
->loadrs
= (new_bsp
- new_bspstore
) * 8 << 16;
701 /* Verify the previous stack state before we change it */
702 if (user_mode(regs
)) {
703 msg
= "occurred in user space";
706 if (r13
!= sos
->prev_IA64_KR_CURRENT
) {
707 msg
= "inconsistent previous current and r13";
710 if ((r12
- r13
) >= KERNEL_STACK_SIZE
) {
711 msg
= "inconsistent r12 and r13";
714 if ((ar_bspstore
- r13
) >= KERNEL_STACK_SIZE
) {
715 msg
= "inconsistent ar.bspstore and r13";
720 msg
= "old_bspstore is in the wrong region";
723 if ((ar_bsp
- r13
) >= KERNEL_STACK_SIZE
) {
724 msg
= "inconsistent ar.bsp and r13";
727 size
+= (ia64_rse_skip_regs(old_bspstore
, slots
) - old_bspstore
) * 8;
728 if (ar_bspstore
+ size
> r12
) {
729 msg
= "no room for blocked state";
733 /* Change the comm field on the MCA/INT task to include the pid that
734 * was interrupted, it makes for easier debugging. If that pid was 0
735 * (swapper or nested MCA/INIT) then use the start of the previous comm
736 * field suffixed with its cpu.
738 if (previous_current
->pid
)
739 snprintf(comm
, sizeof(comm
), "%s %d",
740 current
->comm
, previous_current
->pid
);
743 if ((p
= strchr(previous_current
->comm
, ' ')))
744 l
= p
- previous_current
->comm
;
746 l
= strlen(previous_current
->comm
);
747 snprintf(comm
, sizeof(comm
), "%s %*s %d",
748 current
->comm
, l
, previous_current
->comm
,
749 previous_current
->thread_info
->cpu
);
751 memcpy(current
->comm
, comm
, sizeof(current
->comm
));
753 /* Make the original task look blocked. First stack a struct pt_regs,
754 * describing the state at the time of interrupt. mca_asm.S built a
755 * partial pt_regs, copy it and fill in the blanks using minstate.
757 p
= (char *)r12
- sizeof(*regs
);
758 old_regs
= (struct pt_regs
*)p
;
759 memcpy(old_regs
, regs
, sizeof(*regs
));
760 /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
761 * pmsa_{xip,xpsr,xfs}
763 if (ia64_psr(regs
)->ic
) {
764 old_regs
->cr_iip
= ms
->pmsa_iip
;
765 old_regs
->cr_ipsr
= ms
->pmsa_ipsr
;
766 old_regs
->cr_ifs
= ms
->pmsa_ifs
;
768 old_regs
->cr_iip
= ms
->pmsa_xip
;
769 old_regs
->cr_ipsr
= ms
->pmsa_xpsr
;
770 old_regs
->cr_ifs
= ms
->pmsa_xfs
;
772 old_regs
->pr
= ms
->pmsa_pr
;
773 old_regs
->b0
= ms
->pmsa_br0
;
774 old_regs
->loadrs
= loadrs
;
775 old_regs
->ar_rsc
= ms
->pmsa_rsc
;
776 old_unat
= old_regs
->ar_unat
;
777 copy_reg(&ms
->pmsa_gr
[1-1], ms
->pmsa_nat_bits
, &old_regs
->r1
, &old_unat
);
778 copy_reg(&ms
->pmsa_gr
[2-1], ms
->pmsa_nat_bits
, &old_regs
->r2
, &old_unat
);
779 copy_reg(&ms
->pmsa_gr
[3-1], ms
->pmsa_nat_bits
, &old_regs
->r3
, &old_unat
);
780 copy_reg(&ms
->pmsa_gr
[8-1], ms
->pmsa_nat_bits
, &old_regs
->r8
, &old_unat
);
781 copy_reg(&ms
->pmsa_gr
[9-1], ms
->pmsa_nat_bits
, &old_regs
->r9
, &old_unat
);
782 copy_reg(&ms
->pmsa_gr
[10-1], ms
->pmsa_nat_bits
, &old_regs
->r10
, &old_unat
);
783 copy_reg(&ms
->pmsa_gr
[11-1], ms
->pmsa_nat_bits
, &old_regs
->r11
, &old_unat
);
784 copy_reg(&ms
->pmsa_gr
[12-1], ms
->pmsa_nat_bits
, &old_regs
->r12
, &old_unat
);
785 copy_reg(&ms
->pmsa_gr
[13-1], ms
->pmsa_nat_bits
, &old_regs
->r13
, &old_unat
);
786 copy_reg(&ms
->pmsa_gr
[14-1], ms
->pmsa_nat_bits
, &old_regs
->r14
, &old_unat
);
787 copy_reg(&ms
->pmsa_gr
[15-1], ms
->pmsa_nat_bits
, &old_regs
->r15
, &old_unat
);
788 if (ia64_psr(old_regs
)->bn
)
789 bank
= ms
->pmsa_bank1_gr
;
791 bank
= ms
->pmsa_bank0_gr
;
792 copy_reg(&bank
[16-16], ms
->pmsa_nat_bits
, &old_regs
->r16
, &old_unat
);
793 copy_reg(&bank
[17-16], ms
->pmsa_nat_bits
, &old_regs
->r17
, &old_unat
);
794 copy_reg(&bank
[18-16], ms
->pmsa_nat_bits
, &old_regs
->r18
, &old_unat
);
795 copy_reg(&bank
[19-16], ms
->pmsa_nat_bits
, &old_regs
->r19
, &old_unat
);
796 copy_reg(&bank
[20-16], ms
->pmsa_nat_bits
, &old_regs
->r20
, &old_unat
);
797 copy_reg(&bank
[21-16], ms
->pmsa_nat_bits
, &old_regs
->r21
, &old_unat
);
798 copy_reg(&bank
[22-16], ms
->pmsa_nat_bits
, &old_regs
->r22
, &old_unat
);
799 copy_reg(&bank
[23-16], ms
->pmsa_nat_bits
, &old_regs
->r23
, &old_unat
);
800 copy_reg(&bank
[24-16], ms
->pmsa_nat_bits
, &old_regs
->r24
, &old_unat
);
801 copy_reg(&bank
[25-16], ms
->pmsa_nat_bits
, &old_regs
->r25
, &old_unat
);
802 copy_reg(&bank
[26-16], ms
->pmsa_nat_bits
, &old_regs
->r26
, &old_unat
);
803 copy_reg(&bank
[27-16], ms
->pmsa_nat_bits
, &old_regs
->r27
, &old_unat
);
804 copy_reg(&bank
[28-16], ms
->pmsa_nat_bits
, &old_regs
->r28
, &old_unat
);
805 copy_reg(&bank
[29-16], ms
->pmsa_nat_bits
, &old_regs
->r29
, &old_unat
);
806 copy_reg(&bank
[30-16], ms
->pmsa_nat_bits
, &old_regs
->r30
, &old_unat
);
807 copy_reg(&bank
[31-16], ms
->pmsa_nat_bits
, &old_regs
->r31
, &old_unat
);
809 /* Next stack a struct switch_stack. mca_asm.S built a partial
810 * switch_stack, copy it and fill in the blanks using pt_regs and
813 * In the synthesized switch_stack, b0 points to ia64_leave_kernel,
814 * ar.pfs is set to 0.
816 * unwind.c::unw_unwind() does special processing for interrupt frames.
817 * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate
818 * is clear then unw_unwind() does _not_ adjust bsp over pt_regs. Not
819 * that this is documented, of course. Set PRED_NON_SYSCALL in the
820 * switch_stack on the original stack so it will unwind correctly when
821 * unwind.c reads pt_regs.
823 * thread.ksp is updated to point to the synthesized switch_stack.
825 p
-= sizeof(struct switch_stack
);
826 old_sw
= (struct switch_stack
*)p
;
827 memcpy(old_sw
, sw
, sizeof(*sw
));
828 old_sw
->caller_unat
= old_unat
;
829 old_sw
->ar_fpsr
= old_regs
->ar_fpsr
;
830 copy_reg(&ms
->pmsa_gr
[4-1], ms
->pmsa_nat_bits
, &old_sw
->r4
, &old_unat
);
831 copy_reg(&ms
->pmsa_gr
[5-1], ms
->pmsa_nat_bits
, &old_sw
->r5
, &old_unat
);
832 copy_reg(&ms
->pmsa_gr
[6-1], ms
->pmsa_nat_bits
, &old_sw
->r6
, &old_unat
);
833 copy_reg(&ms
->pmsa_gr
[7-1], ms
->pmsa_nat_bits
, &old_sw
->r7
, &old_unat
);
834 old_sw
->b0
= (u64
)ia64_leave_kernel
;
835 old_sw
->b1
= ms
->pmsa_br1
;
837 old_sw
->ar_unat
= old_unat
;
838 old_sw
->pr
= old_regs
->pr
| (1UL << PRED_NON_SYSCALL
);
839 previous_current
->thread
.ksp
= (u64
)p
- 16;
841 /* Finally copy the original stack's registers back to its RBS.
842 * Registers from ar.bspstore through ar.bsp at the time of the event
843 * are in the current RBS, copy them back to the original stack. The
844 * copy must be done register by register because the original bspstore
845 * and the current one have different alignments, so the saved RNAT
846 * data occurs at different places.
848 * mca_asm does cover, so the old_bsp already includes all registers at
849 * the time of MCA/INIT. It also does flushrs, so all registers before
850 * this function have been written to backing store on the MCA/INIT
853 new_rnat
= ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore
));
854 old_rnat
= regs
->ar_rnat
;
856 if (ia64_rse_is_rnat_slot(new_bspstore
)) {
857 new_rnat
= ia64_get_rnat(new_bspstore
++);
859 if (ia64_rse_is_rnat_slot(old_bspstore
)) {
860 *old_bspstore
++ = old_rnat
;
863 nat
= (new_rnat
>> ia64_rse_slot_num(new_bspstore
)) & 1UL;
864 old_rnat
&= ~(1UL << ia64_rse_slot_num(old_bspstore
));
865 old_rnat
|= (nat
<< ia64_rse_slot_num(old_bspstore
));
866 *old_bspstore
++ = *new_bspstore
++;
868 old_sw
->ar_bspstore
= (unsigned long)old_bspstore
;
869 old_sw
->ar_rnat
= old_rnat
;
871 sos
->prev_task
= previous_current
;
872 return previous_current
;
875 printk(KERN_INFO
"cpu %d, %s %s, original stack not modified\n",
876 smp_processor_id(), type
, msg
);
877 return previous_current
;
880 /* The monarch/slave interaction is based on monarch_cpu and requires that all
881 * slaves have entered rendezvous before the monarch leaves. If any cpu has
882 * not entered rendezvous yet then wait a bit. The assumption is that any
883 * slave that has not rendezvoused after a reasonable time is never going to do
884 * so. In this context, slave includes cpus that respond to the MCA rendezvous
885 * interrupt, as well as cpus that receive the INIT slave event.
889 ia64_wait_for_slaves(int monarch
)
892 for_each_online_cpu(c
) {
895 if (ia64_mc_info
.imi_rendez_checkin
[c
] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE
) {
896 udelay(1000); /* short wait first */
903 for_each_online_cpu(c
) {
906 if (ia64_mc_info
.imi_rendez_checkin
[c
] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE
) {
907 udelay(5*1000000); /* wait 5 seconds for slaves (arbitrary) */
916 * This is uncorrectable machine check handler called from OS_MCA
917 * dispatch code which is in turn called from SAL_CHECK().
918 * This is the place where the core of OS MCA handling is done.
919 * Right now the logs are extracted and displayed in a well-defined
920 * format. This handler code is supposed to be run only on the
921 * monarch processor. Once the monarch is done with MCA handling
922 * further MCA logging is enabled by clearing logs.
923 * Monarch also has the duty of sending wakeup-IPIs to pull the
924 * slave processors out of rendezvous spinloop.
927 ia64_mca_handler(struct pt_regs
*regs
, struct switch_stack
*sw
,
928 struct ia64_sal_os_state
*sos
)
930 pal_processor_state_info_t
*psp
= (pal_processor_state_info_t
*)
931 &sos
->proc_state_param
;
932 int recover
, cpu
= smp_processor_id();
933 task_t
*previous_current
;
935 oops_in_progress
= 1; /* FIXME: make printk NMI/MCA/INIT safe */
936 previous_current
= ia64_mca_modify_original_stack(regs
, sw
, sos
, "MCA");
938 ia64_wait_for_slaves(cpu
);
940 /* Wakeup all the processors which are spinning in the rendezvous loop.
941 * They will leave SAL, then spin in the OS with interrupts disabled
942 * until this monarch cpu leaves the MCA handler. That gets control
943 * back to the OS so we can backtrace the other cpus, backtrace when
944 * spinning in SAL does not work.
946 ia64_mca_wakeup_all();
948 /* Get the MCA error record and log it */
949 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA
);
951 /* TLB error is only exist in this SAL error record */
952 recover
= (psp
->tc
&& !(psp
->cc
|| psp
->bc
|| psp
->rc
|| psp
->uc
))
953 /* other error recovery */
954 || (ia64_mca_ucmc_extension
955 && ia64_mca_ucmc_extension(
956 IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA
),
960 sal_log_record_header_t
*rh
= IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA
);
961 rh
->severity
= sal_log_severity_corrected
;
962 ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA
);
963 sos
->os_status
= IA64_MCA_CORRECTED
;
966 set_curr_task(cpu
, previous_current
);
970 static DECLARE_WORK(cmc_disable_work
, ia64_mca_cmc_vector_disable_keventd
, NULL
);
971 static DECLARE_WORK(cmc_enable_work
, ia64_mca_cmc_vector_enable_keventd
, NULL
);
974 * ia64_mca_cmc_int_handler
976 * This is corrected machine check interrupt handler.
977 * Right now the logs are extracted and displayed in a well-defined
982 * client data arg ptr
983 * saved registers ptr
989 ia64_mca_cmc_int_handler(int cmc_irq
, void *arg
, struct pt_regs
*ptregs
)
991 static unsigned long cmc_history
[CMC_HISTORY_LENGTH
];
993 static DEFINE_SPINLOCK(cmc_history_lock
);
995 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
996 __FUNCTION__
, cmc_irq
, smp_processor_id());
998 /* SAL spec states this should run w/ interrupts enabled */
1001 /* Get the CMC error record and log it */
1002 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC
);
1004 spin_lock(&cmc_history_lock
);
1005 if (!cmc_polling_enabled
) {
1006 int i
, count
= 1; /* we know 1 happened now */
1007 unsigned long now
= jiffies
;
1009 for (i
= 0; i
< CMC_HISTORY_LENGTH
; i
++) {
1010 if (now
- cmc_history
[i
] <= HZ
)
1014 IA64_MCA_DEBUG(KERN_INFO
"CMC threshold %d/%d\n", count
, CMC_HISTORY_LENGTH
);
1015 if (count
>= CMC_HISTORY_LENGTH
) {
1017 cmc_polling_enabled
= 1;
1018 spin_unlock(&cmc_history_lock
);
1019 schedule_work(&cmc_disable_work
);
1022 * Corrected errors will still be corrected, but
1023 * make sure there's a log somewhere that indicates
1024 * something is generating more than we can handle.
1026 printk(KERN_WARNING
"WARNING: Switching to polling CMC handler; error records may be lost\n");
1028 mod_timer(&cmc_poll_timer
, jiffies
+ CMC_POLL_INTERVAL
);
1030 /* lock already released, get out now */
1033 cmc_history
[index
++] = now
;
1034 if (index
== CMC_HISTORY_LENGTH
)
1038 spin_unlock(&cmc_history_lock
);
1043 * ia64_mca_cmc_int_caller
1045 * Triggered by sw interrupt from CMC polling routine. Calls
1046 * real interrupt handler and either triggers a sw interrupt
1047 * on the next cpu or does cleanup at the end.
1051 * client data arg ptr
1052 * saved registers ptr
1057 ia64_mca_cmc_int_caller(int cmc_irq
, void *arg
, struct pt_regs
*ptregs
)
1059 static int start_count
= -1;
1062 cpuid
= smp_processor_id();
1064 /* If first cpu, update count */
1065 if (start_count
== -1)
1066 start_count
= IA64_LOG_COUNT(SAL_INFO_TYPE_CMC
);
1068 ia64_mca_cmc_int_handler(cmc_irq
, arg
, ptregs
);
1070 for (++cpuid
; cpuid
< NR_CPUS
&& !cpu_online(cpuid
) ; cpuid
++);
1072 if (cpuid
< NR_CPUS
) {
1073 platform_send_ipi(cpuid
, IA64_CMCP_VECTOR
, IA64_IPI_DM_INT
, 0);
1075 /* If no log record, switch out of polling mode */
1076 if (start_count
== IA64_LOG_COUNT(SAL_INFO_TYPE_CMC
)) {
1078 printk(KERN_WARNING
"Returning to interrupt driven CMC handler\n");
1079 schedule_work(&cmc_enable_work
);
1080 cmc_polling_enabled
= 0;
1084 mod_timer(&cmc_poll_timer
, jiffies
+ CMC_POLL_INTERVAL
);
1096 * Poll for Corrected Machine Checks (CMCs)
1098 * Inputs : dummy(unused)
1103 ia64_mca_cmc_poll (unsigned long dummy
)
1105 /* Trigger a CMC interrupt cascade */
1106 platform_send_ipi(first_cpu(cpu_online_map
), IA64_CMCP_VECTOR
, IA64_IPI_DM_INT
, 0);
1110 * ia64_mca_cpe_int_caller
1112 * Triggered by sw interrupt from CPE polling routine. Calls
1113 * real interrupt handler and either triggers a sw interrupt
1114 * on the next cpu or does cleanup at the end.
1118 * client data arg ptr
1119 * saved registers ptr
1126 ia64_mca_cpe_int_caller(int cpe_irq
, void *arg
, struct pt_regs
*ptregs
)
1128 static int start_count
= -1;
1129 static int poll_time
= MIN_CPE_POLL_INTERVAL
;
1132 cpuid
= smp_processor_id();
1134 /* If first cpu, update count */
1135 if (start_count
== -1)
1136 start_count
= IA64_LOG_COUNT(SAL_INFO_TYPE_CPE
);
1138 ia64_mca_cpe_int_handler(cpe_irq
, arg
, ptregs
);
1140 for (++cpuid
; cpuid
< NR_CPUS
&& !cpu_online(cpuid
) ; cpuid
++);
1142 if (cpuid
< NR_CPUS
) {
1143 platform_send_ipi(cpuid
, IA64_CPEP_VECTOR
, IA64_IPI_DM_INT
, 0);
1146 * If a log was recorded, increase our polling frequency,
1147 * otherwise, backoff or return to interrupt mode.
1149 if (start_count
!= IA64_LOG_COUNT(SAL_INFO_TYPE_CPE
)) {
1150 poll_time
= max(MIN_CPE_POLL_INTERVAL
, poll_time
/ 2);
1151 } else if (cpe_vector
< 0) {
1152 poll_time
= min(MAX_CPE_POLL_INTERVAL
, poll_time
* 2);
1154 poll_time
= MIN_CPE_POLL_INTERVAL
;
1156 printk(KERN_WARNING
"Returning to interrupt driven CPE handler\n");
1157 enable_irq(local_vector_to_irq(IA64_CPE_VECTOR
));
1158 cpe_poll_enabled
= 0;
1161 if (cpe_poll_enabled
)
1162 mod_timer(&cpe_poll_timer
, jiffies
+ poll_time
);
1172 * Poll for Corrected Platform Errors (CPEs), trigger interrupt
1173 * on first cpu, from there it will trickle through all the cpus.
1175 * Inputs : dummy(unused)
1180 ia64_mca_cpe_poll (unsigned long dummy
)
1182 /* Trigger a CPE interrupt cascade */
1183 platform_send_ipi(first_cpu(cpu_online_map
), IA64_CPEP_VECTOR
, IA64_IPI_DM_INT
, 0);
1186 #endif /* CONFIG_ACPI */
1189 * C portion of the OS INIT handler
1191 * Called from ia64_os_init_dispatch
1193 * Inputs: pointer to pt_regs where processor info was saved. SAL/OS state for
1194 * this event. This code is used for both monarch and slave INIT events, see
1197 * All INIT events switch to the INIT stack and change the previous process to
1198 * blocked status. If one of the INIT events is the monarch then we are
1199 * probably processing the nmi button/command. Use the monarch cpu to dump all
1200 * the processes. The slave INIT events all spin until the monarch cpu
1201 * returns. We can also get INIT slave events for MCA, in which case the MCA
1202 * process is the monarch.
1206 ia64_init_handler(struct pt_regs
*regs
, struct switch_stack
*sw
,
1207 struct ia64_sal_os_state
*sos
)
1209 static atomic_t slaves
;
1210 static atomic_t monarchs
;
1211 task_t
*previous_current
;
1212 int cpu
= smp_processor_id(), c
;
1213 struct task_struct
*g
, *t
;
1215 oops_in_progress
= 1; /* FIXME: make printk NMI/MCA/INIT safe */
1216 console_loglevel
= 15; /* make sure printks make it to console */
1218 printk(KERN_INFO
"Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
1219 sos
->proc_state_param
, cpu
, sos
->monarch
);
1220 salinfo_log_wakeup(SAL_INFO_TYPE_INIT
, NULL
, 0, 0);
1222 previous_current
= ia64_mca_modify_original_stack(regs
, sw
, sos
, "INIT");
1223 sos
->os_status
= IA64_INIT_RESUME
;
1225 /* FIXME: Workaround for broken proms that drive all INIT events as
1226 * slaves. The last slave that enters is promoted to be a monarch.
1227 * Remove this code in September 2006, that gives platforms a year to
1228 * fix their proms and get their customers updated.
1230 if (!sos
->monarch
&& atomic_add_return(1, &slaves
) == num_online_cpus()) {
1231 printk(KERN_WARNING
"%s: Promoting cpu %d to monarch.\n",
1233 atomic_dec(&slaves
);
1237 /* FIXME: Workaround for broken proms that drive all INIT events as
1238 * monarchs. Second and subsequent monarchs are demoted to slaves.
1239 * Remove this code in September 2006, that gives platforms a year to
1240 * fix their proms and get their customers updated.
1242 if (sos
->monarch
&& atomic_add_return(1, &monarchs
) > 1) {
1243 printk(KERN_WARNING
"%s: Demoting cpu %d to slave.\n",
1245 atomic_dec(&monarchs
);
1249 if (!sos
->monarch
) {
1250 ia64_mc_info
.imi_rendez_checkin
[cpu
] = IA64_MCA_RENDEZ_CHECKIN_INIT
;
1251 while (monarch_cpu
== -1)
1252 cpu_relax(); /* spin until monarch enters */
1253 while (monarch_cpu
!= -1)
1254 cpu_relax(); /* spin until monarch leaves */
1255 printk("Slave on cpu %d returning to normal service.\n", cpu
);
1256 set_curr_task(cpu
, previous_current
);
1257 ia64_mc_info
.imi_rendez_checkin
[cpu
] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE
;
1258 atomic_dec(&slaves
);
1265 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
1266 * generated via the BMC's command-line interface, but since the console is on the
1267 * same serial line, the user will need some time to switch out of the BMC before
1270 printk("Delaying for 5 seconds...\n");
1272 ia64_wait_for_slaves(cpu
);
1273 printk(KERN_ERR
"Processes interrupted by INIT -");
1274 for_each_online_cpu(c
) {
1275 struct ia64_sal_os_state
*s
;
1276 t
= __va(__per_cpu_mca
[c
] + IA64_MCA_CPU_INIT_STACK_OFFSET
);
1277 s
= (struct ia64_sal_os_state
*)((char *)t
+ MCA_SOS_OFFSET
);
1281 printk(" %d", g
->pid
);
1283 printk(" %d (cpu %d task 0x%p)", g
->pid
, task_cpu(g
), g
);
1287 if (read_trylock(&tasklist_lock
)) {
1288 do_each_thread (g
, t
) {
1289 printk("\nBacktrace of pid %d (%s)\n", t
->pid
, t
->comm
);
1290 show_stack(t
, NULL
);
1291 } while_each_thread (g
, t
);
1292 read_unlock(&tasklist_lock
);
1294 printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu
);
1295 atomic_dec(&monarchs
);
1296 set_curr_task(cpu
, previous_current
);
1302 ia64_mca_disable_cpe_polling(char *str
)
1304 cpe_poll_enabled
= 0;
1308 __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling
);
1310 static struct irqaction cmci_irqaction
= {
1311 .handler
= ia64_mca_cmc_int_handler
,
1312 .flags
= SA_INTERRUPT
,
1316 static struct irqaction cmcp_irqaction
= {
1317 .handler
= ia64_mca_cmc_int_caller
,
1318 .flags
= SA_INTERRUPT
,
1322 static struct irqaction mca_rdzv_irqaction
= {
1323 .handler
= ia64_mca_rendez_int_handler
,
1324 .flags
= SA_INTERRUPT
,
1328 static struct irqaction mca_wkup_irqaction
= {
1329 .handler
= ia64_mca_wakeup_int_handler
,
1330 .flags
= SA_INTERRUPT
,
1335 static struct irqaction mca_cpe_irqaction
= {
1336 .handler
= ia64_mca_cpe_int_handler
,
1337 .flags
= SA_INTERRUPT
,
1341 static struct irqaction mca_cpep_irqaction
= {
1342 .handler
= ia64_mca_cpe_int_caller
,
1343 .flags
= SA_INTERRUPT
,
1346 #endif /* CONFIG_ACPI */
1348 /* Minimal format of the MCA/INIT stacks. The pseudo processes that run on
1349 * these stacks can never sleep, they cannot return from the kernel to user
1350 * space, they do not appear in a normal ps listing. So there is no need to
1351 * format most of the fields.
1355 format_mca_init_stack(void *mca_data
, unsigned long offset
,
1356 const char *type
, int cpu
)
1358 struct task_struct
*p
= (struct task_struct
*)((char *)mca_data
+ offset
);
1359 struct thread_info
*ti
;
1360 memset(p
, 0, KERNEL_STACK_SIZE
);
1361 ti
= (struct thread_info
*)((char *)p
+ IA64_TASK_SIZE
);
1362 ti
->flags
= _TIF_MCA_INIT
;
1363 ti
->preempt_count
= 1;
1366 p
->thread_info
= ti
;
1367 p
->state
= TASK_UNINTERRUPTIBLE
;
1368 __set_bit(cpu
, &p
->cpus_allowed
);
1369 INIT_LIST_HEAD(&p
->tasks
);
1370 p
->parent
= p
->real_parent
= p
->group_leader
= p
;
1371 INIT_LIST_HEAD(&p
->children
);
1372 INIT_LIST_HEAD(&p
->sibling
);
1373 strncpy(p
->comm
, type
, sizeof(p
->comm
)-1);
1376 /* Do per-CPU MCA-related initialization. */
1379 ia64_mca_cpu_init(void *cpu_data
)
1383 if (smp_processor_id() == 0) {
1387 mca_data
= alloc_bootmem(sizeof(struct ia64_mca_cpu
)
1388 * NR_CPUS
+ KERNEL_STACK_SIZE
);
1389 mca_data
= (void *)(((unsigned long)mca_data
+
1390 KERNEL_STACK_SIZE
- 1) &
1391 (-KERNEL_STACK_SIZE
));
1392 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
1393 format_mca_init_stack(mca_data
,
1394 offsetof(struct ia64_mca_cpu
, mca_stack
),
1396 format_mca_init_stack(mca_data
,
1397 offsetof(struct ia64_mca_cpu
, init_stack
),
1399 __per_cpu_mca
[cpu
] = __pa(mca_data
);
1400 mca_data
+= sizeof(struct ia64_mca_cpu
);
1405 * The MCA info structure was allocated earlier and its
1406 * physical address saved in __per_cpu_mca[cpu]. Copy that
1407 * address * to ia64_mca_data so we can access it as a per-CPU
1410 __get_cpu_var(ia64_mca_data
) = __per_cpu_mca
[smp_processor_id()];
1413 * Stash away a copy of the PTE needed to map the per-CPU page.
1414 * We may need it during MCA recovery.
1416 __get_cpu_var(ia64_mca_per_cpu_pte
) =
1417 pte_val(mk_pte_phys(__pa(cpu_data
), PAGE_KERNEL
));
1420 * Also, stash away a copy of the PAL address and the PTE
1423 pal_vaddr
= efi_get_pal_addr();
1426 __get_cpu_var(ia64_mca_pal_base
) =
1427 GRANULEROUNDDOWN((unsigned long) pal_vaddr
);
1428 __get_cpu_var(ia64_mca_pal_pte
) = pte_val(mk_pte_phys(__pa(pal_vaddr
),
1435 * Do all the system level mca specific initialization.
1437 * 1. Register spinloop and wakeup request interrupt vectors
1439 * 2. Register OS_MCA handler entry point
1441 * 3. Register OS_INIT handler entry point
1443 * 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
1445 * Note that this initialization is done very early before some kernel
1446 * services are available.
1455 ia64_fptr_t
*init_hldlr_ptr_monarch
= (ia64_fptr_t
*)ia64_os_init_dispatch_monarch
;
1456 ia64_fptr_t
*init_hldlr_ptr_slave
= (ia64_fptr_t
*)ia64_os_init_dispatch_slave
;
1457 ia64_fptr_t
*mca_hldlr_ptr
= (ia64_fptr_t
*)ia64_os_mca_dispatch
;
1460 struct ia64_sal_retval isrv
;
1461 u64 timeout
= IA64_MCA_RENDEZ_TIMEOUT
; /* platform specific */
1463 IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__
);
1465 /* Clear the Rendez checkin flag for all cpus */
1466 for(i
= 0 ; i
< NR_CPUS
; i
++)
1467 ia64_mc_info
.imi_rendez_checkin
[i
] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE
;
1470 * Register the rendezvous spinloop and wakeup mechanism with SAL
1473 /* Register the rendezvous interrupt vector with SAL */
1475 isrv
= ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT
,
1476 SAL_MC_PARAM_MECHANISM_INT
,
1477 IA64_MCA_RENDEZ_VECTOR
,
1479 SAL_MC_PARAM_RZ_ALWAYS
);
1484 printk(KERN_INFO
"Increasing MCA rendezvous timeout from "
1485 "%ld to %ld milliseconds\n", timeout
, isrv
.v0
);
1489 printk(KERN_ERR
"Failed to register rendezvous interrupt "
1490 "with SAL (status %ld)\n", rc
);
1494 /* Register the wakeup interrupt vector with SAL */
1495 isrv
= ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP
,
1496 SAL_MC_PARAM_MECHANISM_INT
,
1497 IA64_MCA_WAKEUP_VECTOR
,
1501 printk(KERN_ERR
"Failed to register wakeup interrupt with SAL "
1502 "(status %ld)\n", rc
);
1506 IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__
);
1508 ia64_mc_info
.imi_mca_handler
= ia64_tpa(mca_hldlr_ptr
->fp
);
1510 * XXX - disable SAL checksum by setting size to 0; should be
1511 * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
1513 ia64_mc_info
.imi_mca_handler_size
= 0;
1515 /* Register the os mca handler with SAL */
1516 if ((rc
= ia64_sal_set_vectors(SAL_VECTOR_OS_MCA
,
1517 ia64_mc_info
.imi_mca_handler
,
1518 ia64_tpa(mca_hldlr_ptr
->gp
),
1519 ia64_mc_info
.imi_mca_handler_size
,
1522 printk(KERN_ERR
"Failed to register OS MCA handler with SAL "
1523 "(status %ld)\n", rc
);
1527 IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__
,
1528 ia64_mc_info
.imi_mca_handler
, ia64_tpa(mca_hldlr_ptr
->gp
));
1531 * XXX - disable SAL checksum by setting size to 0, should be
1532 * size of the actual init handler in mca_asm.S.
1534 ia64_mc_info
.imi_monarch_init_handler
= ia64_tpa(init_hldlr_ptr_monarch
->fp
);
1535 ia64_mc_info
.imi_monarch_init_handler_size
= 0;
1536 ia64_mc_info
.imi_slave_init_handler
= ia64_tpa(init_hldlr_ptr_slave
->fp
);
1537 ia64_mc_info
.imi_slave_init_handler_size
= 0;
1539 IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__
,
1540 ia64_mc_info
.imi_monarch_init_handler
);
1542 /* Register the os init handler with SAL */
1543 if ((rc
= ia64_sal_set_vectors(SAL_VECTOR_OS_INIT
,
1544 ia64_mc_info
.imi_monarch_init_handler
,
1545 ia64_tpa(ia64_getreg(_IA64_REG_GP
)),
1546 ia64_mc_info
.imi_monarch_init_handler_size
,
1547 ia64_mc_info
.imi_slave_init_handler
,
1548 ia64_tpa(ia64_getreg(_IA64_REG_GP
)),
1549 ia64_mc_info
.imi_slave_init_handler_size
)))
1551 printk(KERN_ERR
"Failed to register m/s INIT handlers with SAL "
1552 "(status %ld)\n", rc
);
1556 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__
);
1559 * Configure the CMCI/P vector and handler. Interrupts for CMC are
1560 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
1562 register_percpu_irq(IA64_CMC_VECTOR
, &cmci_irqaction
);
1563 register_percpu_irq(IA64_CMCP_VECTOR
, &cmcp_irqaction
);
1564 ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
1566 /* Setup the MCA rendezvous interrupt vector */
1567 register_percpu_irq(IA64_MCA_RENDEZ_VECTOR
, &mca_rdzv_irqaction
);
1569 /* Setup the MCA wakeup interrupt vector */
1570 register_percpu_irq(IA64_MCA_WAKEUP_VECTOR
, &mca_wkup_irqaction
);
1573 /* Setup the CPEI/P handler */
1574 register_percpu_irq(IA64_CPEP_VECTOR
, &mca_cpep_irqaction
);
1577 /* Initialize the areas set aside by the OS to buffer the
1578 * platform/processor error states for MCA/INIT/CMC
1581 ia64_log_init(SAL_INFO_TYPE_MCA
);
1582 ia64_log_init(SAL_INFO_TYPE_INIT
);
1583 ia64_log_init(SAL_INFO_TYPE_CMC
);
1584 ia64_log_init(SAL_INFO_TYPE_CPE
);
1587 printk(KERN_INFO
"MCA related initialization done\n");
1591 * ia64_mca_late_init
1593 * Opportunity to setup things that require initialization later
1594 * than ia64_mca_init. Setup a timer to poll for CPEs if the
1595 * platform doesn't support an interrupt driven mechanism.
1601 ia64_mca_late_init(void)
1606 /* Setup the CMCI/P vector and handler */
1607 init_timer(&cmc_poll_timer
);
1608 cmc_poll_timer
.function
= ia64_mca_cmc_poll
;
1610 /* Unmask/enable the vector */
1611 cmc_polling_enabled
= 0;
1612 schedule_work(&cmc_enable_work
);
1614 IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__
);
1617 /* Setup the CPEI/P vector and handler */
1618 cpe_vector
= acpi_request_vector(ACPI_INTERRUPT_CPEI
);
1619 init_timer(&cpe_poll_timer
);
1620 cpe_poll_timer
.function
= ia64_mca_cpe_poll
;
1626 if (cpe_vector
>= 0) {
1627 /* If platform supports CPEI, enable the irq. */
1628 cpe_poll_enabled
= 0;
1629 for (irq
= 0; irq
< NR_IRQS
; ++irq
)
1630 if (irq_to_vector(irq
) == cpe_vector
) {
1631 desc
= irq_descp(irq
);
1632 desc
->status
|= IRQ_PER_CPU
;
1633 setup_irq(irq
, &mca_cpe_irqaction
);
1635 ia64_mca_register_cpev(cpe_vector
);
1636 IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__
);
1638 /* If platform doesn't support CPEI, get the timer going. */
1639 if (cpe_poll_enabled
) {
1640 ia64_mca_cpe_poll(0UL);
1641 IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__
);
1650 device_initcall(ia64_mca_late_init
);