2 * Thermal throttle event support code (such as syslog messaging and rate
3 * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c).
5 * This allows consistent reporting of CPU thermal throttle events.
7 * Maintains a counter in /sys that keeps track of the number of thermal
8 * events, such that the user knows how bad the thermal problem might be
9 * (since the logging to syslog and mcelog is rate limited).
11 * Author: Dmitriy Zavin (dmitriyz@google.com)
13 * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c.
14 * Inspired by Ross Biro's and Al Borchers' counter code.
16 #include <linux/interrupt.h>
17 #include <linux/notifier.h>
18 #include <linux/jiffies.h>
19 #include <linux/kernel.h>
20 #include <linux/percpu.h>
21 #include <linux/export.h>
22 #include <linux/types.h>
23 #include <linux/init.h>
24 #include <linux/smp.h>
25 #include <linux/cpu.h>
27 #include <asm/processor.h>
32 #include <asm/trace/irq_vectors.h>
34 /* How long to wait between reporting thermal events */
35 #define CHECK_INTERVAL (300 * HZ)
37 #define THERMAL_THROTTLING_EVENT 0
38 #define POWER_LIMIT_EVENT 1
41 * Current thermal event state:
43 struct _thermal_state
{
48 unsigned long last_count
;
51 struct thermal_state
{
52 struct _thermal_state core_throttle
;
53 struct _thermal_state core_power_limit
;
54 struct _thermal_state package_throttle
;
55 struct _thermal_state package_power_limit
;
56 struct _thermal_state core_thresh0
;
57 struct _thermal_state core_thresh1
;
58 struct _thermal_state pkg_thresh0
;
59 struct _thermal_state pkg_thresh1
;
62 /* Callback to handle core threshold interrupts */
63 int (*platform_thermal_notify
)(__u64 msr_val
);
64 EXPORT_SYMBOL(platform_thermal_notify
);
66 /* Callback to handle core package threshold_interrupts */
67 int (*platform_thermal_package_notify
)(__u64 msr_val
);
68 EXPORT_SYMBOL_GPL(platform_thermal_package_notify
);
70 /* Callback support of rate control, return true, if
71 * callback has rate control */
72 bool (*platform_thermal_package_rate_control
)(void);
73 EXPORT_SYMBOL_GPL(platform_thermal_package_rate_control
);
76 static DEFINE_PER_CPU(struct thermal_state
, thermal_state
);
78 static atomic_t therm_throt_en
= ATOMIC_INIT(0);
80 static u32 lvtthmr_init __read_mostly
;
83 #define define_therm_throt_device_one_ro(_name) \
84 static DEVICE_ATTR(_name, 0444, \
85 therm_throt_device_show_##_name, \
88 #define define_therm_throt_device_show_func(event, name) \
90 static ssize_t therm_throt_device_show_##event##_##name( \
92 struct device_attribute *attr, \
95 unsigned int cpu = dev->id; \
98 preempt_disable(); /* CPU hotplug */ \
99 if (cpu_online(cpu)) { \
100 ret = sprintf(buf, "%lu\n", \
101 per_cpu(thermal_state, cpu).event.name); \
109 define_therm_throt_device_show_func(core_throttle
, count
);
110 define_therm_throt_device_one_ro(core_throttle_count
);
112 define_therm_throt_device_show_func(core_power_limit
, count
);
113 define_therm_throt_device_one_ro(core_power_limit_count
);
115 define_therm_throt_device_show_func(package_throttle
, count
);
116 define_therm_throt_device_one_ro(package_throttle_count
);
118 define_therm_throt_device_show_func(package_power_limit
, count
);
119 define_therm_throt_device_one_ro(package_power_limit_count
);
121 static struct attribute
*thermal_throttle_attrs
[] = {
122 &dev_attr_core_throttle_count
.attr
,
126 static struct attribute_group thermal_attr_group
= {
127 .attrs
= thermal_throttle_attrs
,
128 .name
= "thermal_throttle"
130 #endif /* CONFIG_SYSFS */
133 #define PACKAGE_LEVEL 1
136 * therm_throt_process - Process thermal throttling event from interrupt
137 * @curr: Whether the condition is current or not (boolean), since the
138 * thermal interrupt normally gets called both when the thermal
139 * event begins and once the event has ended.
141 * This function is called by the thermal interrupt after the
142 * IRQ has been acknowledged.
144 * It will take care of rate limiting and printing messages to the syslog.
146 * Returns: 0 : Event should NOT be further logged, i.e. still in
147 * "timeout" from previous log message.
148 * 1 : Event should be logged further, and a message has been
149 * printed to the syslog.
151 static int therm_throt_process(bool new_event
, int event
, int level
)
153 struct _thermal_state
*state
;
154 unsigned int this_cpu
= smp_processor_id();
157 struct thermal_state
*pstate
= &per_cpu(thermal_state
, this_cpu
);
159 now
= get_jiffies_64();
160 if (level
== CORE_LEVEL
) {
161 if (event
== THERMAL_THROTTLING_EVENT
)
162 state
= &pstate
->core_throttle
;
163 else if (event
== POWER_LIMIT_EVENT
)
164 state
= &pstate
->core_power_limit
;
167 } else if (level
== PACKAGE_LEVEL
) {
168 if (event
== THERMAL_THROTTLING_EVENT
)
169 state
= &pstate
->package_throttle
;
170 else if (event
== POWER_LIMIT_EVENT
)
171 state
= &pstate
->package_power_limit
;
177 old_event
= state
->new_event
;
178 state
->new_event
= new_event
;
183 if (time_before64(now
, state
->next_check
) &&
184 state
->count
!= state
->last_count
)
187 state
->next_check
= now
+ CHECK_INTERVAL
;
188 state
->last_count
= state
->count
;
190 /* if we just entered the thermal event */
192 if (event
== THERMAL_THROTTLING_EVENT
)
193 pr_crit("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
195 level
== CORE_LEVEL
? "Core" : "Package",
200 if (event
== THERMAL_THROTTLING_EVENT
)
201 pr_info("CPU%d: %s temperature/speed normal\n", this_cpu
,
202 level
== CORE_LEVEL
? "Core" : "Package");
209 static int thresh_event_valid(int level
, int event
)
211 struct _thermal_state
*state
;
212 unsigned int this_cpu
= smp_processor_id();
213 struct thermal_state
*pstate
= &per_cpu(thermal_state
, this_cpu
);
214 u64 now
= get_jiffies_64();
216 if (level
== PACKAGE_LEVEL
)
217 state
= (event
== 0) ? &pstate
->pkg_thresh0
:
218 &pstate
->pkg_thresh1
;
220 state
= (event
== 0) ? &pstate
->core_thresh0
:
221 &pstate
->core_thresh1
;
223 if (time_before64(now
, state
->next_check
))
226 state
->next_check
= now
+ CHECK_INTERVAL
;
231 static bool int_pln_enable
;
232 static int __init
int_pln_enable_setup(char *s
)
234 int_pln_enable
= true;
238 __setup("int_pln_enable", int_pln_enable_setup
);
241 /* Add/Remove thermal_throttle interface for CPU device: */
242 static int thermal_throttle_add_dev(struct device
*dev
, unsigned int cpu
)
245 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
247 err
= sysfs_create_group(&dev
->kobj
, &thermal_attr_group
);
251 if (cpu_has(c
, X86_FEATURE_PLN
) && int_pln_enable
)
252 err
= sysfs_add_file_to_group(&dev
->kobj
,
253 &dev_attr_core_power_limit_count
.attr
,
254 thermal_attr_group
.name
);
255 if (cpu_has(c
, X86_FEATURE_PTS
)) {
256 err
= sysfs_add_file_to_group(&dev
->kobj
,
257 &dev_attr_package_throttle_count
.attr
,
258 thermal_attr_group
.name
);
259 if (cpu_has(c
, X86_FEATURE_PLN
) && int_pln_enable
)
260 err
= sysfs_add_file_to_group(&dev
->kobj
,
261 &dev_attr_package_power_limit_count
.attr
,
262 thermal_attr_group
.name
);
268 static void thermal_throttle_remove_dev(struct device
*dev
)
270 sysfs_remove_group(&dev
->kobj
, &thermal_attr_group
);
273 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
275 thermal_throttle_cpu_callback(struct notifier_block
*nfb
,
276 unsigned long action
,
279 unsigned int cpu
= (unsigned long)hcpu
;
283 dev
= get_cpu_device(cpu
);
287 case CPU_UP_PREPARE_FROZEN
:
288 err
= thermal_throttle_add_dev(dev
, cpu
);
291 case CPU_UP_CANCELED
:
292 case CPU_UP_CANCELED_FROZEN
:
294 case CPU_DEAD_FROZEN
:
295 thermal_throttle_remove_dev(dev
);
298 return notifier_from_errno(err
);
301 static struct notifier_block thermal_throttle_cpu_notifier
=
303 .notifier_call
= thermal_throttle_cpu_callback
,
306 static __init
int thermal_throttle_init_device(void)
308 unsigned int cpu
= 0;
311 if (!atomic_read(&therm_throt_en
))
314 cpu_notifier_register_begin();
316 /* connect live CPUs to sysfs */
317 for_each_online_cpu(cpu
) {
318 err
= thermal_throttle_add_dev(get_cpu_device(cpu
), cpu
);
322 __register_hotcpu_notifier(&thermal_throttle_cpu_notifier
);
323 cpu_notifier_register_done();
327 device_initcall(thermal_throttle_init_device
);
329 #endif /* CONFIG_SYSFS */
331 static void notify_package_thresholds(__u64 msr_val
)
333 bool notify_thres_0
= false;
334 bool notify_thres_1
= false;
336 if (!platform_thermal_package_notify
)
339 /* lower threshold check */
340 if (msr_val
& THERM_LOG_THRESHOLD0
)
341 notify_thres_0
= true;
342 /* higher threshold check */
343 if (msr_val
& THERM_LOG_THRESHOLD1
)
344 notify_thres_1
= true;
346 if (!notify_thres_0
&& !notify_thres_1
)
349 if (platform_thermal_package_rate_control
&&
350 platform_thermal_package_rate_control()) {
351 /* Rate control is implemented in callback */
352 platform_thermal_package_notify(msr_val
);
356 /* lower threshold reached */
357 if (notify_thres_0
&& thresh_event_valid(PACKAGE_LEVEL
, 0))
358 platform_thermal_package_notify(msr_val
);
359 /* higher threshold reached */
360 if (notify_thres_1
&& thresh_event_valid(PACKAGE_LEVEL
, 1))
361 platform_thermal_package_notify(msr_val
);
364 static void notify_thresholds(__u64 msr_val
)
366 /* check whether the interrupt handler is defined;
367 * otherwise simply return
369 if (!platform_thermal_notify
)
372 /* lower threshold reached */
373 if ((msr_val
& THERM_LOG_THRESHOLD0
) &&
374 thresh_event_valid(CORE_LEVEL
, 0))
375 platform_thermal_notify(msr_val
);
376 /* higher threshold reached */
377 if ((msr_val
& THERM_LOG_THRESHOLD1
) &&
378 thresh_event_valid(CORE_LEVEL
, 1))
379 platform_thermal_notify(msr_val
);
382 /* Thermal transition interrupt handler */
383 static void intel_thermal_interrupt(void)
387 if (static_cpu_has(X86_FEATURE_HWP
))
388 wrmsrl_safe(MSR_HWP_STATUS
, 0);
390 rdmsrl(MSR_IA32_THERM_STATUS
, msr_val
);
392 /* Check for violation of core thermal thresholds*/
393 notify_thresholds(msr_val
);
395 if (therm_throt_process(msr_val
& THERM_STATUS_PROCHOT
,
396 THERMAL_THROTTLING_EVENT
,
398 mce_log_therm_throt_event(msr_val
);
400 if (this_cpu_has(X86_FEATURE_PLN
) && int_pln_enable
)
401 therm_throt_process(msr_val
& THERM_STATUS_POWER_LIMIT
,
405 if (this_cpu_has(X86_FEATURE_PTS
)) {
406 rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS
, msr_val
);
407 /* check violations of package thermal thresholds */
408 notify_package_thresholds(msr_val
);
409 therm_throt_process(msr_val
& PACKAGE_THERM_STATUS_PROCHOT
,
410 THERMAL_THROTTLING_EVENT
,
412 if (this_cpu_has(X86_FEATURE_PLN
) && int_pln_enable
)
413 therm_throt_process(msr_val
&
414 PACKAGE_THERM_STATUS_POWER_LIMIT
,
420 static void unexpected_thermal_interrupt(void)
422 pr_err("CPU%d: Unexpected LVT thermal interrupt!\n",
426 static void (*smp_thermal_vector
)(void) = unexpected_thermal_interrupt
;
428 static inline void __smp_thermal_interrupt(void)
430 inc_irq_stat(irq_thermal_count
);
431 smp_thermal_vector();
434 asmlinkage __visible
void smp_thermal_interrupt(struct pt_regs
*regs
)
437 __smp_thermal_interrupt();
441 asmlinkage __visible
void smp_trace_thermal_interrupt(struct pt_regs
*regs
)
444 trace_thermal_apic_entry(THERMAL_APIC_VECTOR
);
445 __smp_thermal_interrupt();
446 trace_thermal_apic_exit(THERMAL_APIC_VECTOR
);
450 /* Thermal monitoring depends on APIC, ACPI and clock modulation */
451 static int intel_thermal_supported(struct cpuinfo_x86
*c
)
455 if (!cpu_has(c
, X86_FEATURE_ACPI
) || !cpu_has(c
, X86_FEATURE_ACC
))
460 void __init
mcheck_intel_therm_init(void)
463 * This function is only called on boot CPU. Save the init thermal
464 * LVT value on BSP and use that value to restore APs' thermal LVT
465 * entry BIOS programmed later
467 if (intel_thermal_supported(&boot_cpu_data
))
468 lvtthmr_init
= apic_read(APIC_LVTTHMR
);
471 void intel_init_thermal(struct cpuinfo_x86
*c
)
473 unsigned int cpu
= smp_processor_id();
477 if (!intel_thermal_supported(c
))
481 * First check if its enabled already, in which case there might
482 * be some SMM goo which handles it, so we can't even put a handler
483 * since it might be delivered via SMI already:
485 rdmsr(MSR_IA32_MISC_ENABLE
, l
, h
);
489 * The initial value of thermal LVT entries on all APs always reads
490 * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
491 * sequence to them and LVT registers are reset to 0s except for
492 * the mask bits which are set to 1s when APs receive INIT IPI.
493 * If BIOS takes over the thermal interrupt and sets its interrupt
494 * delivery mode to SMI (not fixed), it restores the value that the
495 * BIOS has programmed on AP based on BSP's info we saved since BIOS
496 * is always setting the same value for all threads/cores.
498 if ((h
& APIC_DM_FIXED_MASK
) != APIC_DM_FIXED
)
499 apic_write(APIC_LVTTHMR
, lvtthmr_init
);
502 if ((l
& MSR_IA32_MISC_ENABLE_TM1
) && (h
& APIC_DM_SMI
)) {
503 if (system_state
== SYSTEM_BOOTING
)
504 pr_debug("CPU%d: Thermal monitoring handled by SMI\n", cpu
);
508 /* early Pentium M models use different method for enabling TM2 */
509 if (cpu_has(c
, X86_FEATURE_TM2
)) {
510 if (c
->x86
== 6 && (c
->x86_model
== 9 || c
->x86_model
== 13)) {
511 rdmsr(MSR_THERM2_CTL
, l
, h
);
512 if (l
& MSR_THERM2_CTL_TM_SELECT
)
514 } else if (l
& MSR_IA32_MISC_ENABLE_TM2
)
518 /* We'll mask the thermal vector in the lapic till we're ready: */
519 h
= THERMAL_APIC_VECTOR
| APIC_DM_FIXED
| APIC_LVT_MASKED
;
520 apic_write(APIC_LVTTHMR
, h
);
522 rdmsr(MSR_IA32_THERM_INTERRUPT
, l
, h
);
523 if (cpu_has(c
, X86_FEATURE_PLN
) && !int_pln_enable
)
524 wrmsr(MSR_IA32_THERM_INTERRUPT
,
525 (l
| (THERM_INT_LOW_ENABLE
526 | THERM_INT_HIGH_ENABLE
)) & ~THERM_INT_PLN_ENABLE
, h
);
527 else if (cpu_has(c
, X86_FEATURE_PLN
) && int_pln_enable
)
528 wrmsr(MSR_IA32_THERM_INTERRUPT
,
529 l
| (THERM_INT_LOW_ENABLE
530 | THERM_INT_HIGH_ENABLE
| THERM_INT_PLN_ENABLE
), h
);
532 wrmsr(MSR_IA32_THERM_INTERRUPT
,
533 l
| (THERM_INT_LOW_ENABLE
| THERM_INT_HIGH_ENABLE
), h
);
535 if (cpu_has(c
, X86_FEATURE_PTS
)) {
536 rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT
, l
, h
);
537 if (cpu_has(c
, X86_FEATURE_PLN
) && !int_pln_enable
)
538 wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT
,
539 (l
| (PACKAGE_THERM_INT_LOW_ENABLE
540 | PACKAGE_THERM_INT_HIGH_ENABLE
))
541 & ~PACKAGE_THERM_INT_PLN_ENABLE
, h
);
542 else if (cpu_has(c
, X86_FEATURE_PLN
) && int_pln_enable
)
543 wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT
,
544 l
| (PACKAGE_THERM_INT_LOW_ENABLE
545 | PACKAGE_THERM_INT_HIGH_ENABLE
546 | PACKAGE_THERM_INT_PLN_ENABLE
), h
);
548 wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT
,
549 l
| (PACKAGE_THERM_INT_LOW_ENABLE
550 | PACKAGE_THERM_INT_HIGH_ENABLE
), h
);
553 smp_thermal_vector
= intel_thermal_interrupt
;
555 rdmsr(MSR_IA32_MISC_ENABLE
, l
, h
);
556 wrmsr(MSR_IA32_MISC_ENABLE
, l
| MSR_IA32_MISC_ENABLE_TM1
, h
);
558 /* Unmask the thermal vector: */
559 l
= apic_read(APIC_LVTTHMR
);
560 apic_write(APIC_LVTTHMR
, l
& ~APIC_LVT_MASKED
);
562 pr_info_once("CPU0: Thermal monitoring enabled (%s)\n",
563 tm2
? "TM2" : "TM1");
565 /* enable thermal throttle processing */
566 atomic_set(&therm_throt_en
, 1);