x86, mce: squash mce_intel.c into therm_throt.c
[deliverable/linux.git] / arch / x86 / kernel / cpu / mcheck / therm_throt.c
CommitLineData
15d5f839 1/*
3222b36f
DZ
2 * Thermal throttle event support code (such as syslog messaging and rate
3 * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c).
cb6f3c15 4 *
3222b36f
DZ
5 * This allows consistent reporting of CPU thermal throttle events.
6 *
7 * Maintains a counter in /sys that keeps track of the number of thermal
8 * events, such that the user knows how bad the thermal problem might be
9 * (since the logging to syslog and mcelog is rate limited).
15d5f839
DZ
10 *
11 * Author: Dmitriy Zavin (dmitriyz@google.com)
12 *
13 * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c.
3222b36f 14 * Inspired by Ross Biro's and Al Borchers' counter code.
15d5f839 15 */
a65c88dd 16#include <linux/interrupt.h>
cb6f3c15
IM
17#include <linux/notifier.h>
18#include <linux/jiffies.h>
895287c0 19#include <linux/kernel.h>
15d5f839 20#include <linux/percpu.h>
3222b36f 21#include <linux/sysdev.h>
895287c0
HS
22#include <linux/types.h>
23#include <linux/init.h>
24#include <linux/smp.h>
15d5f839 25#include <linux/cpu.h>
cb6f3c15 26
15d5f839 27#include <asm/therm_throt.h>
895287c0
HS
28#include <asm/processor.h>
29#include <asm/system.h>
30#include <asm/apic.h>
a65c88dd
HS
31#include <asm/idle.h>
32#include <asm/mce.h>
895287c0 33#include <asm/msr.h>
15d5f839
DZ
34
35/* How long to wait between reporting thermal events */
cb6f3c15 36#define CHECK_INTERVAL (300 * HZ)
15d5f839 37
3222b36f
DZ
38static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES;
39static DEFINE_PER_CPU(unsigned long, thermal_throttle_count);
cb6f3c15
IM
40
41atomic_t therm_throt_en = ATOMIC_INIT(0);
3222b36f
DZ
42
43#ifdef CONFIG_SYSFS
cb6f3c15
IM
44#define define_therm_throt_sysdev_one_ro(_name) \
45 static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL)
46
47#define define_therm_throt_sysdev_show_func(name) \
48static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \
49 struct sysdev_attribute *attr, \
50 char *buf) \
51{ \
52 unsigned int cpu = dev->id; \
53 ssize_t ret; \
54 \
55 preempt_disable(); /* CPU hotplug */ \
56 if (cpu_online(cpu)) \
57 ret = sprintf(buf, "%lu\n", \
58 per_cpu(thermal_throttle_##name, cpu)); \
59 else \
60 ret = 0; \
61 preempt_enable(); \
62 \
63 return ret; \
3222b36f
DZ
64}
65
66define_therm_throt_sysdev_show_func(count);
67define_therm_throt_sysdev_one_ro(count);
68
69static struct attribute *thermal_throttle_attrs[] = {
70 &attr_count.attr,
71 NULL
72};
73
74static struct attribute_group thermal_throttle_attr_group = {
cb6f3c15
IM
75 .attrs = thermal_throttle_attrs,
76 .name = "thermal_throttle"
3222b36f
DZ
77};
78#endif /* CONFIG_SYSFS */
15d5f839
DZ
79
80/***
3222b36f 81 * therm_throt_process - Process thermal throttling event from interrupt
15d5f839
DZ
82 * @curr: Whether the condition is current or not (boolean), since the
83 * thermal interrupt normally gets called both when the thermal
84 * event begins and once the event has ended.
85 *
3222b36f 86 * This function is called by the thermal interrupt after the
15d5f839
DZ
87 * IRQ has been acknowledged.
88 *
89 * It will take care of rate limiting and printing messages to the syslog.
90 *
91 * Returns: 0 : Event should NOT be further logged, i.e. still in
92 * "timeout" from previous log message.
93 * 1 : Event should be logged further, and a message has been
94 * printed to the syslog.
95 */
96int therm_throt_process(int curr)
97{
98 unsigned int cpu = smp_processor_id();
66aea991 99 __u64 tmp_jiffs = get_jiffies_64();
15d5f839 100
3222b36f
DZ
101 if (curr)
102 __get_cpu_var(thermal_throttle_count)++;
103
66aea991 104 if (time_before64(tmp_jiffs, __get_cpu_var(next_check)))
15d5f839
DZ
105 return 0;
106
66aea991 107 __get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL;
15d5f839
DZ
108
109 /* if we just entered the thermal event */
110 if (curr) {
111 printk(KERN_CRIT "CPU%d: Temperature above threshold, "
3222b36f
DZ
112 "cpu clock throttled (total events = %lu)\n", cpu,
113 __get_cpu_var(thermal_throttle_count));
114
15d5f839
DZ
115 add_taint(TAINT_MACHINE_CHECK);
116 } else {
117 printk(KERN_CRIT "CPU%d: Temperature/speed normal\n", cpu);
118 }
119
120 return 1;
121}
3222b36f
DZ
122
123#ifdef CONFIG_SYSFS
cb6f3c15 124/* Add/Remove thermal_throttle interface for CPU device: */
6569345a 125static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
3222b36f 126{
cb6f3c15
IM
127 return sysfs_create_group(&sys_dev->kobj,
128 &thermal_throttle_attr_group);
3222b36f
DZ
129}
130
6569345a 131static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev)
3222b36f 132{
7c36752a 133 sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group);
3222b36f
DZ
134}
135
cb6f3c15 136/* Mutex protecting device creation against CPU hotplug: */
3222b36f
DZ
137static DEFINE_MUTEX(therm_cpu_lock);
138
139/* Get notified when a cpu comes on/off. Be hotplug friendly. */
cb6f3c15
IM
140static __cpuinit int
141thermal_throttle_cpu_callback(struct notifier_block *nfb,
142 unsigned long action,
143 void *hcpu)
3222b36f
DZ
144{
145 unsigned int cpu = (unsigned long)hcpu;
146 struct sys_device *sys_dev;
c7e38a9c 147 int err = 0;
3222b36f
DZ
148
149 sys_dev = get_cpu_sysdev(cpu);
cb6f3c15 150
3222b36f 151 switch (action) {
c7e38a9c
AM
152 case CPU_UP_PREPARE:
153 case CPU_UP_PREPARE_FROZEN:
38ef6d19 154 mutex_lock(&therm_cpu_lock);
6569345a 155 err = thermal_throttle_add_dev(sys_dev);
38ef6d19 156 mutex_unlock(&therm_cpu_lock);
6569345a 157 WARN_ON(err);
3222b36f 158 break;
c7e38a9c
AM
159 case CPU_UP_CANCELED:
160 case CPU_UP_CANCELED_FROZEN:
3222b36f 161 case CPU_DEAD:
8bb78442 162 case CPU_DEAD_FROZEN:
38ef6d19 163 mutex_lock(&therm_cpu_lock);
3222b36f 164 thermal_throttle_remove_dev(sys_dev);
38ef6d19 165 mutex_unlock(&therm_cpu_lock);
3222b36f
DZ
166 break;
167 }
c7e38a9c 168 return err ? NOTIFY_BAD : NOTIFY_OK;
3222b36f
DZ
169}
170
25d1b516 171static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
3222b36f
DZ
172{
173 .notifier_call = thermal_throttle_cpu_callback,
174};
3222b36f
DZ
175
176static __init int thermal_throttle_init_device(void)
177{
178 unsigned int cpu = 0;
6569345a 179 int err;
3222b36f
DZ
180
181 if (!atomic_read(&therm_throt_en))
182 return 0;
183
184 register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
185
186#ifdef CONFIG_HOTPLUG_CPU
187 mutex_lock(&therm_cpu_lock);
188#endif
189 /* connect live CPUs to sysfs */
6569345a
SH
190 for_each_online_cpu(cpu) {
191 err = thermal_throttle_add_dev(get_cpu_sysdev(cpu));
192 WARN_ON(err);
193 }
3222b36f
DZ
194#ifdef CONFIG_HOTPLUG_CPU
195 mutex_unlock(&therm_cpu_lock);
196#endif
197
198 return 0;
199}
3222b36f 200device_initcall(thermal_throttle_init_device);
a65c88dd 201
3222b36f 202#endif /* CONFIG_SYSFS */
a65c88dd
HS
203
204/* Thermal transition interrupt handler */
205void intel_thermal_interrupt(void)
206{
207 __u64 msr_val;
208
209 rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
210 if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT))
211 mce_log_therm_throt_event(msr_val);
212}
213
214static void unexpected_thermal_interrupt(void)
215{
216 printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n",
217 smp_processor_id());
218 add_taint(TAINT_MACHINE_CHECK);
219}
220
221static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
222
223asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
224{
225 exit_idle();
226 irq_enter();
227 inc_irq_stat(irq_thermal_count);
228 smp_thermal_vector();
229 irq_exit();
230 /* Ack only at the end to avoid potential reentry */
231 ack_APIC_irq();
232}
233
234void intel_set_thermal_handler(void)
235{
236 smp_thermal_vector = intel_thermal_interrupt;
237}
895287c0
HS
238
239void intel_init_thermal(struct cpuinfo_x86 *c)
240{
241 unsigned int cpu = smp_processor_id();
242 int tm2 = 0;
243 u32 l, h;
244
245 /* Thermal monitoring depends on ACPI and clock modulation*/
246 if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
247 return;
248
249 /*
250 * First check if its enabled already, in which case there might
251 * be some SMM goo which handles it, so we can't even put a handler
252 * since it might be delivered via SMI already:
253 */
254 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
255 h = apic_read(APIC_LVTTHMR);
256 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
257 printk(KERN_DEBUG
258 "CPU%d: Thermal monitoring handled by SMI\n", cpu);
259 return;
260 }
261
262 if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2))
263 tm2 = 1;
264
265 /* Check whether a vector already exists */
266 if (h & APIC_VECTOR_MASK) {
267 printk(KERN_DEBUG
268 "CPU%d: Thermal LVT vector (%#x) already installed\n",
269 cpu, (h & APIC_VECTOR_MASK));
270 return;
271 }
272
273 /* We'll mask the thermal vector in the lapic till we're ready: */
274 h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED;
275 apic_write(APIC_LVTTHMR, h);
276
277 rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
278 wrmsr(MSR_IA32_THERM_INTERRUPT,
279 l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h);
280
281 intel_set_thermal_handler();
282
283 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
284 wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
285
286 /* Unmask the thermal vector: */
287 l = apic_read(APIC_LVTTHMR);
288 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
289
290 printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n",
291 cpu, tm2 ? "TM2" : "TM1");
292
293 /* enable thermal throttle processing */
294 atomic_set(&therm_throt_en, 1);
295}
This page took 0.317066 seconds and 5 git commands to generate.