Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Intel specific MCE features. | |
3 | * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca> | |
88ccbedd AK |
4 | * Copyright (C) 2008, 2009 Intel Corporation |
5 | * Author: Andi Kleen | |
1da177e4 LT |
6 | */ |
7 | ||
8 | #include <linux/init.h> | |
9 | #include <linux/interrupt.h> | |
10 | #include <linux/percpu.h> | |
11 | #include <asm/processor.h> | |
7b6aa335 | 12 | #include <asm/apic.h> |
1da177e4 LT |
13 | #include <asm/msr.h> |
14 | #include <asm/mce.h> | |
15 | #include <asm/hw_irq.h> | |
95833c83 | 16 | #include <asm/idle.h> |
15d5f839 | 17 | #include <asm/therm_throt.h> |
88ccbedd | 18 | #include <asm/apic.h> |
1da177e4 LT |
19 | |
20 | asmlinkage void smp_thermal_interrupt(void) | |
21 | { | |
15d5f839 | 22 | __u64 msr_val; |
1da177e4 LT |
23 | |
24 | ack_APIC_irq(); | |
25 | ||
95833c83 | 26 | exit_idle(); |
1da177e4 | 27 | irq_enter(); |
15d5f839 DZ |
28 | |
29 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); | |
30 | if (therm_throt_process(msr_val & 1)) | |
b5f2fa4e | 31 | mce_log_therm_throt_event(msr_val); |
15d5f839 | 32 | |
8ae93669 | 33 | inc_irq_stat(irq_thermal_count); |
1da177e4 LT |
34 | irq_exit(); |
35 | } | |
36 | ||
cc3ca220 | 37 | static void intel_init_thermal(struct cpuinfo_x86 *c) |
1da177e4 LT |
38 | { |
39 | u32 l, h; | |
40 | int tm2 = 0; | |
41 | unsigned int cpu = smp_processor_id(); | |
42 | ||
43 | if (!cpu_has(c, X86_FEATURE_ACPI)) | |
44 | return; | |
45 | ||
46 | if (!cpu_has(c, X86_FEATURE_ACC)) | |
47 | return; | |
48 | ||
49 | /* first check if TM1 is already enabled by the BIOS, in which | |
50 | * case there might be some SMM goo which handles it, so we can't even | |
51 | * put a handler since it might be delivered via SMI already. | |
52 | */ | |
53 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | |
54 | h = apic_read(APIC_LVTTHMR); | |
ecab22aa | 55 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { |
1da177e4 LT |
56 | printk(KERN_DEBUG |
57 | "CPU%d: Thermal monitoring handled by SMI\n", cpu); | |
58 | return; | |
59 | } | |
60 | ||
ecab22aa | 61 | if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2)) |
1da177e4 LT |
62 | tm2 = 1; |
63 | ||
64 | if (h & APIC_VECTOR_MASK) { | |
65 | printk(KERN_DEBUG | |
66 | "CPU%d: Thermal LVT vector (%#x) already " | |
67 | "installed\n", cpu, (h & APIC_VECTOR_MASK)); | |
68 | return; | |
69 | } | |
70 | ||
71 | h = THERMAL_APIC_VECTOR; | |
72 | h |= (APIC_DM_FIXED | APIC_LVT_MASKED); | |
11a8e778 | 73 | apic_write(APIC_LVTTHMR, h); |
1da177e4 LT |
74 | |
75 | rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); | |
76 | wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h); | |
77 | ||
78 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | |
ecab22aa | 79 | wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); |
1da177e4 LT |
80 | |
81 | l = apic_read(APIC_LVTTHMR); | |
11a8e778 | 82 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); |
1da177e4 LT |
83 | printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n", |
84 | cpu, tm2 ? "TM2" : "TM1"); | |
3222b36f DZ |
85 | |
86 | /* enable thermal throttle processing */ | |
87 | atomic_set(&therm_throt_en, 1); | |
1da177e4 LT |
88 | return; |
89 | } | |
90 | ||
88ccbedd AK |
91 | /* |
92 | * Support for Intel Correct Machine Check Interrupts. This allows | |
93 | * the CPU to raise an interrupt when a corrected machine check happened. | |
94 | * Normally we pick those up using a regular polling timer. | |
95 | * Also supports reliable discovery of shared banks. | |
96 | */ | |
97 | ||
98 | static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); | |
99 | ||
100 | /* | |
101 | * cmci_discover_lock protects against parallel discovery attempts | |
102 | * which could race against each other. | |
103 | */ | |
104 | static DEFINE_SPINLOCK(cmci_discover_lock); | |
105 | ||
106 | #define CMCI_THRESHOLD 1 | |
107 | ||
df20e2eb | 108 | static int cmci_supported(int *banks) |
88ccbedd AK |
109 | { |
110 | u64 cap; | |
111 | ||
112 | /* | |
113 | * Vendor check is not strictly needed, but the initial | |
114 | * initialization is vendor keyed and this | |
115 | * makes sure none of the backdoors are entered otherwise. | |
116 | */ | |
117 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | |
118 | return 0; | |
119 | if (!cpu_has_apic || lapic_get_maxlvt() < 6) | |
120 | return 0; | |
121 | rdmsrl(MSR_IA32_MCG_CAP, cap); | |
122 | *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff); | |
123 | return !!(cap & MCG_CMCI_P); | |
124 | } | |
125 | ||
126 | /* | |
127 | * The interrupt handler. This is called on every event. | |
128 | * Just call the poller directly to log any events. | |
129 | * This could in theory increase the threshold under high load, | |
130 | * but doesn't for now. | |
131 | */ | |
132 | static void intel_threshold_interrupt(void) | |
133 | { | |
134 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | |
135 | mce_notify_user(); | |
136 | } | |
137 | ||
138 | static void print_update(char *type, int *hdr, int num) | |
139 | { | |
140 | if (*hdr == 0) | |
141 | printk(KERN_INFO "CPU %d MCA banks", smp_processor_id()); | |
142 | *hdr = 1; | |
143 | printk(KERN_CONT " %s:%d", type, num); | |
144 | } | |
145 | ||
146 | /* | |
147 | * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks | |
148 | * on this CPU. Use the algorithm recommended in the SDM to discover shared | |
149 | * banks. | |
150 | */ | |
df20e2eb | 151 | static void cmci_discover(int banks, int boot) |
88ccbedd AK |
152 | { |
153 | unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); | |
154 | int hdr = 0; | |
155 | int i; | |
156 | ||
157 | spin_lock(&cmci_discover_lock); | |
158 | for (i = 0; i < banks; i++) { | |
159 | u64 val; | |
160 | ||
161 | if (test_bit(i, owned)) | |
162 | continue; | |
163 | ||
164 | rdmsrl(MSR_IA32_MC0_CTL2 + i, val); | |
165 | ||
166 | /* Already owned by someone else? */ | |
167 | if (val & CMCI_EN) { | |
168 | if (test_and_clear_bit(i, owned) || boot) | |
169 | print_update("SHD", &hdr, i); | |
170 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | |
171 | continue; | |
172 | } | |
173 | ||
174 | val |= CMCI_EN | CMCI_THRESHOLD; | |
175 | wrmsrl(MSR_IA32_MC0_CTL2 + i, val); | |
176 | rdmsrl(MSR_IA32_MC0_CTL2 + i, val); | |
177 | ||
178 | /* Did the enable bit stick? -- the bank supports CMCI */ | |
179 | if (val & CMCI_EN) { | |
180 | if (!test_and_set_bit(i, owned) || boot) | |
181 | print_update("CMCI", &hdr, i); | |
182 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | |
183 | } else { | |
184 | WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); | |
185 | } | |
186 | } | |
187 | spin_unlock(&cmci_discover_lock); | |
188 | if (hdr) | |
189 | printk(KERN_CONT "\n"); | |
190 | } | |
191 | ||
192 | /* | |
193 | * Just in case we missed an event during initialization check | |
194 | * all the CMCI owned banks. | |
195 | */ | |
df20e2eb | 196 | void cmci_recheck(void) |
88ccbedd AK |
197 | { |
198 | unsigned long flags; | |
199 | int banks; | |
200 | ||
201 | if (!mce_available(¤t_cpu_data) || !cmci_supported(&banks)) | |
202 | return; | |
203 | local_irq_save(flags); | |
204 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | |
205 | local_irq_restore(flags); | |
206 | } | |
207 | ||
208 | /* | |
209 | * Disable CMCI on this CPU for all banks it owns when it goes down. | |
210 | * This allows other CPUs to claim the banks on rediscovery. | |
211 | */ | |
df20e2eb | 212 | void cmci_clear(void) |
88ccbedd AK |
213 | { |
214 | int i; | |
215 | int banks; | |
216 | u64 val; | |
217 | ||
218 | if (!cmci_supported(&banks)) | |
219 | return; | |
220 | spin_lock(&cmci_discover_lock); | |
221 | for (i = 0; i < banks; i++) { | |
222 | if (!test_bit(i, __get_cpu_var(mce_banks_owned))) | |
223 | continue; | |
224 | /* Disable CMCI */ | |
225 | rdmsrl(MSR_IA32_MC0_CTL2 + i, val); | |
226 | val &= ~(CMCI_EN|CMCI_THRESHOLD_MASK); | |
227 | wrmsrl(MSR_IA32_MC0_CTL2 + i, val); | |
228 | __clear_bit(i, __get_cpu_var(mce_banks_owned)); | |
229 | } | |
230 | spin_unlock(&cmci_discover_lock); | |
231 | } | |
232 | ||
233 | /* | |
234 | * After a CPU went down cycle through all the others and rediscover | |
235 | * Must run in process context. | |
236 | */ | |
df20e2eb | 237 | void cmci_rediscover(int dying) |
88ccbedd AK |
238 | { |
239 | int banks; | |
240 | int cpu; | |
241 | cpumask_var_t old; | |
242 | ||
243 | if (!cmci_supported(&banks)) | |
244 | return; | |
245 | if (!alloc_cpumask_var(&old, GFP_KERNEL)) | |
246 | return; | |
247 | cpumask_copy(old, ¤t->cpus_allowed); | |
248 | ||
249 | for_each_online_cpu (cpu) { | |
250 | if (cpu == dying) | |
251 | continue; | |
4f062896 | 252 | if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) |
88ccbedd AK |
253 | continue; |
254 | /* Recheck banks in case CPUs don't all have the same */ | |
255 | if (cmci_supported(&banks)) | |
256 | cmci_discover(banks, 0); | |
257 | } | |
258 | ||
259 | set_cpus_allowed_ptr(current, old); | |
260 | free_cpumask_var(old); | |
261 | } | |
262 | ||
263 | /* | |
264 | * Reenable CMCI on this CPU in case a CPU down failed. | |
265 | */ | |
266 | void cmci_reenable(void) | |
267 | { | |
268 | int banks; | |
269 | if (cmci_supported(&banks)) | |
270 | cmci_discover(banks, 0); | |
271 | } | |
272 | ||
514ec49a | 273 | static void intel_init_cmci(void) |
88ccbedd AK |
274 | { |
275 | int banks; | |
276 | ||
277 | if (!cmci_supported(&banks)) | |
278 | return; | |
279 | ||
280 | mce_threshold_vector = intel_threshold_interrupt; | |
281 | cmci_discover(banks, 1); | |
282 | /* | |
283 | * For CPU #0 this runs with still disabled APIC, but that's | |
284 | * ok because only the vector is set up. We still do another | |
285 | * check for the banks later for CPU #0 just to make sure | |
286 | * to not miss any events. | |
287 | */ | |
288 | apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED); | |
289 | cmci_recheck(); | |
290 | } | |
291 | ||
cc3ca220 | 292 | void mce_intel_feature_init(struct cpuinfo_x86 *c) |
1da177e4 LT |
293 | { |
294 | intel_init_thermal(c); | |
88ccbedd | 295 | intel_init_cmci(); |
1da177e4 | 296 | } |